* [PATCH virtio v2 01/13] virtio_pci: push out single vq find code to vp_find_one_vq_msix()
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 02/13] virtio_pci: simplify vp_request_msix_vectors() call a bit Jiri Pirko
` (11 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
In order to be reused for admin queue setup, push out common code to
setup and configure irq for one vq into a separate helper.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
v1->v2:
- removed pointless "++" from vp_find_one_vq_msix()
- rebased on top if find_vqs() arg conversion patchset
---
drivers/virtio/virtio_pci_common.c | 68 ++++++++++++++++++------------
1 file changed, 41 insertions(+), 27 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 7d82facafd75..277591a6ab96 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -284,6 +284,44 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->vqs = NULL;
}
+static struct virtqueue *vp_find_one_vq_msix(struct virtio_device *vdev,
+ int queue_idx,
+ vq_callback_t *callback,
+ const char *name, bool ctx,
+ int *allocated_vectors)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtqueue *vq;
+ u16 msix_vec;
+ int err;
+
+ if (!callback)
+ msix_vec = VIRTIO_MSI_NO_VECTOR;
+ else if (vp_dev->per_vq_vectors)
+ msix_vec = (*allocated_vectors)++;
+ else
+ msix_vec = VP_MSIX_VQ_VECTOR;
+ vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec);
+ if (IS_ERR(vq))
+ return vq;
+
+ if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
+ return vq;
+
+ /* allocate per-vq irq if available and necessary */
+ snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names),
+ "%s-%s", dev_name(&vp_dev->vdev.dev), name);
+ err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
+ vring_interrupt, 0,
+ vp_dev->msix_names[msix_vec], vq);
+ if (err) {
+ vp_del_vq(vq);
+ return ERR_PTR(err);
+ }
+
+ return vq;
+}
+
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
struct virtqueue_info vqs_info[],
@@ -292,7 +330,6 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue_info *vqi;
- u16 msix_vec;
int i, err, nvectors, allocated_vectors, queue_idx = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
@@ -325,36 +362,13 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
vqs[i] = NULL;
continue;
}
-
- if (!vqi->callback)
- msix_vec = VIRTIO_MSI_NO_VECTOR;
- else if (vp_dev->per_vq_vectors)
- msix_vec = allocated_vectors++;
- else
- msix_vec = VP_MSIX_VQ_VECTOR;
- vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
- vqi->name, vqi->ctx, msix_vec);
+ vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx,
+ &allocated_vectors);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
}
-
- if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
- continue;
-
- /* allocate per-vq irq if available and necessary */
- snprintf(vp_dev->msix_names[msix_vec],
- sizeof *vp_dev->msix_names,
- "%s-%s",
- dev_name(&vp_dev->vdev.dev), vqi->name);
- err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
- vring_interrupt, 0,
- vp_dev->msix_names[msix_vec],
- vqs[i]);
- if (err) {
- vp_del_vq(vqs[i]);
- goto error_find;
- }
}
return 0;
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 02/13] virtio_pci: simplify vp_request_msix_vectors() call a bit
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 01/13] virtio_pci: push out single vq find code to vp_find_one_vq_msix() Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 03/13] virtio_pci: pass vector policy enum to vp_find_vqs_msix() Jiri Pirko
` (10 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
Pass desc arg as it is always to vp_request_msix_vectors(). There rely
on per_vq_vectors arg and null desc in case it is false.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/virtio/virtio_pci_common.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 277591a6ab96..cda0d35dd9e1 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -125,6 +125,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL))
goto error;
+ if (!per_vq_vectors)
+ desc = NULL;
+
if (desc) {
flags |= PCI_IRQ_AFFINITY;
desc->pre_vectors++; /* virtio config vector */
@@ -349,8 +352,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
nvectors = 2;
}
- err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
- per_vq_vectors ? desc : NULL);
+ err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, desc);
if (err)
goto error_find;
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 03/13] virtio_pci: pass vector policy enum to vp_find_vqs_msix()
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 01/13] virtio_pci: push out single vq find code to vp_find_one_vq_msix() Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 02/13] virtio_pci: simplify vp_request_msix_vectors() call a bit Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 04/13] virtio_pci: pass vector policy enum to vp_find_one_vq_msix() Jiri Pirko
` (9 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
In preparation for another irq allocation fallback,
introduce vector policy enum and pass the values
to vp_find_vqs_msix() instead of bool arg.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
v1->v2:
- rebased on top if find_vqs() arg conversion patchset
---
drivers/virtio/virtio_pci_common.c | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index cda0d35dd9e1..7fec2258d125 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -287,6 +287,11 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->vqs = NULL;
}
+enum vp_vq_vector_policy {
+ VP_VQ_VECTOR_POLICY_EACH,
+ VP_VQ_VECTOR_POLICY_SHARED,
+};
+
static struct virtqueue *vp_find_one_vq_msix(struct virtio_device *vdev,
int queue_idx,
vq_callback_t *callback,
@@ -328,17 +333,20 @@ static struct virtqueue *vp_find_one_vq_msix(struct virtio_device *vdev,
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
struct virtqueue_info vqs_info[],
- bool per_vq_vectors,
+ enum vp_vq_vector_policy vector_policy,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue_info *vqi;
int i, err, nvectors, allocated_vectors, queue_idx = 0;
+ bool per_vq_vectors;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED;
+
if (per_vq_vectors) {
/* Best option: one for change interrupt, one per vq. */
nvectors = 1;
@@ -427,11 +435,13 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
int err;
/* Try MSI-X with one vector per queue. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, true, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_EACH, desc);
if (!err)
return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
- err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info, false, desc);
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_SHARED, desc);
if (!err)
return 0;
/* Is there an interrupt? If not give up. */
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 04/13] virtio_pci: pass vector policy enum to vp_find_one_vq_msix()
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (2 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 03/13] virtio_pci: pass vector policy enum to vp_find_vqs_msix() Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 05/13] virtio_pci: introduce vector allocation fallback for slow path virtqueues Jiri Pirko
` (8 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
Instead of accessing vp_dev->per_vq_vectors, pass vector policy enum
as an argument of vp_find_one_vq_msix() in preparation for another irq
allocation policy.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
v1->v2:
- new patch
---
drivers/virtio/virtio_pci_common.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 7fec2258d125..628f003db79b 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -292,11 +292,11 @@ enum vp_vq_vector_policy {
VP_VQ_VECTOR_POLICY_SHARED,
};
-static struct virtqueue *vp_find_one_vq_msix(struct virtio_device *vdev,
- int queue_idx,
- vq_callback_t *callback,
- const char *name, bool ctx,
- int *allocated_vectors)
+static struct virtqueue *
+vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
+ vq_callback_t *callback, const char *name, bool ctx,
+ int *allocated_vectors,
+ enum vp_vq_vector_policy vector_policy)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
@@ -305,7 +305,7 @@ static struct virtqueue *vp_find_one_vq_msix(struct virtio_device *vdev,
if (!callback)
msix_vec = VIRTIO_MSI_NO_VECTOR;
- else if (vp_dev->per_vq_vectors)
+ else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH)
msix_vec = (*allocated_vectors)++;
else
msix_vec = VP_MSIX_VQ_VECTOR;
@@ -313,7 +313,8 @@ static struct virtqueue *vp_find_one_vq_msix(struct virtio_device *vdev,
if (IS_ERR(vq))
return vq;
- if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
+ if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
+ msix_vec == VIRTIO_MSI_NO_VECTOR)
return vq;
/* allocate per-vq irq if available and necessary */
@@ -374,7 +375,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
}
vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
vqi->name, vqi->ctx,
- &allocated_vectors);
+ &allocated_vectors, vector_policy);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 05/13] virtio_pci: introduce vector allocation fallback for slow path virtqueues
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (3 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 04/13] virtio_pci: pass vector policy enum to vp_find_one_vq_msix() Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 06/13] virtio_pci_modern: treat vp_dev->admin_vq.info.vq pointer as static Jiri Pirko
` (7 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
If there is not enough vectors to satisfy all virtqueues, currently
the fallback is to use one vector for all virtqueues.
That may be unnecessary in some cases, when there is enough vectors per
data queues.
Introduce another fallback policy that tries to allocate vector for all
data queues, however for slow path queues (control/admin) it shares
config vector.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
v1->v2:
- new patch
---
drivers/virtio/virtio_pci_common.c | 53 ++++++++++++++++++++++++++----
drivers/virtio/virtio_pci_common.h | 7 ++--
2 files changed, 51 insertions(+), 9 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 628f003db79b..cb8bbe6c1a9f 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -46,12 +46,26 @@ bool vp_notify(struct virtqueue *vq)
return true;
}
+/* Notify all slow path virtqueues on an interrupt. */
+static void vp_vring_slow_path_interrupt(int irq,
+ struct virtio_pci_device *vp_dev)
+{
+ struct virtio_pci_vq_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp_dev->lock, flags);
+ list_for_each_entry(info, &vp_dev->slow_virtqueues, node)
+ vring_interrupt(irq, info->vq);
+ spin_unlock_irqrestore(&vp_dev->lock, flags);
+}
+
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed(int irq, void *opaque)
{
struct virtio_pci_device *vp_dev = opaque;
virtio_config_changed(&vp_dev->vdev);
+ vp_vring_slow_path_interrupt(irq, vp_dev);
return IRQ_HANDLED;
}
@@ -174,6 +188,11 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
return err;
}
+static bool vp_is_slow_path_vector(u16 msix_vec)
+{
+ return msix_vec == VP_MSIX_CONFIG_VECTOR;
+}
+
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name,
@@ -197,7 +216,10 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
info->vq = vq;
if (callback) {
spin_lock_irqsave(&vp_dev->lock, flags);
- list_add(&info->node, &vp_dev->virtqueues);
+ if (!vp_is_slow_path_vector(msix_vec))
+ list_add(&info->node, &vp_dev->virtqueues);
+ else
+ list_add(&info->node, &vp_dev->slow_virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
} else {
INIT_LIST_HEAD(&info->node);
@@ -245,7 +267,8 @@ void vp_del_vqs(struct virtio_device *vdev)
if (vp_dev->per_vq_vectors) {
int v = vp_dev->vqs[vq->index]->msix_vector;
- if (v != VIRTIO_MSI_NO_VECTOR) {
+ if (v != VIRTIO_MSI_NO_VECTOR &&
+ !vp_is_slow_path_vector(v)) {
int irq = pci_irq_vector(vp_dev->pci_dev, v);
irq_update_affinity_hint(irq, NULL);
@@ -289,13 +312,14 @@ void vp_del_vqs(struct virtio_device *vdev)
enum vp_vq_vector_policy {
VP_VQ_VECTOR_POLICY_EACH,
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW,
VP_VQ_VECTOR_POLICY_SHARED,
};
static struct virtqueue *
vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
vq_callback_t *callback, const char *name, bool ctx,
- int *allocated_vectors,
+ bool slow_path, int *allocated_vectors,
enum vp_vq_vector_policy vector_policy)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -305,8 +329,13 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
if (!callback)
msix_vec = VIRTIO_MSI_NO_VECTOR;
- else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH)
+ else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH ||
+ (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW &&
+ !slow_path))
msix_vec = (*allocated_vectors)++;
+ else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH &&
+ slow_path)
+ msix_vec = VP_MSIX_CONFIG_VECTOR;
else
msix_vec = VP_MSIX_VQ_VECTOR;
vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec);
@@ -314,7 +343,8 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
return vq;
if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
- msix_vec == VIRTIO_MSI_NO_VECTOR)
+ msix_vec == VIRTIO_MSI_NO_VECTOR ||
+ vp_is_slow_path_vector(msix_vec))
return vq;
/* allocate per-vq irq if available and necessary */
@@ -374,7 +404,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
continue;
}
vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
- vqi->name, vqi->ctx,
+ vqi->name, vqi->ctx, false,
&allocated_vectors, vector_policy);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
@@ -440,6 +470,13 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
VP_VQ_VECTOR_POLICY_EACH, desc);
if (!err)
return 0;
+ /* Fallback: MSI-X with one shared vector for config and
+ * slow path queues, one vector per queue for the rest.
+ */
+ err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
+ VP_VQ_VECTOR_POLICY_SHARED_SLOW, desc);
+ if (!err)
+ return 0;
/* Fallback: MSI-X with one vector for config, one shared for queues. */
err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
VP_VQ_VECTOR_POLICY_SHARED, desc);
@@ -493,7 +530,8 @@ const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
if (!vp_dev->per_vq_vectors ||
- vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
+ vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR ||
+ vp_is_slow_path_vector(vp_dev->vqs[index]->msix_vector))
return NULL;
return pci_irq_get_affinity(vp_dev->pci_dev,
@@ -601,6 +639,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->pci_dev = pci_dev;
INIT_LIST_HEAD(&vp_dev->virtqueues);
+ INIT_LIST_HEAD(&vp_dev->slow_virtqueues);
spin_lock_init(&vp_dev->lock);
/* enable the device */
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 3c4bb2d6163a..0cb6fdc8c354 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -35,7 +35,7 @@ struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
- /* the list node for the virtqueues list */
+ /* the list node for the virtqueues or slow_virtqueues list */
struct list_head node;
/* MSI-X vector (or none) */
@@ -66,9 +66,12 @@ struct virtio_pci_device {
/* Where to read and clear interrupt */
u8 __iomem *isr;
- /* a list of queues so we can dispatch IRQs */
+ /* Lists of queues and potentially slow path queues
+ * so we can dispatch IRQs.
+ */
spinlock_t lock;
struct list_head virtqueues;
+ struct list_head slow_virtqueues;
/* Array of all virtqueues reported in the
* PCI common config num_queues field
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 06/13] virtio_pci_modern: treat vp_dev->admin_vq.info.vq pointer as static
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (4 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 05/13] virtio_pci: introduce vector allocation fallback for slow path virtqueues Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 07/13] virtio: push out code to vp_avq_index() Jiri Pirko
` (6 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
It is guaranteed by the virtio_pci and PCI layers that none of the VFs
is probed before setup_vq() is called for admin queue and after del_vq()
is called for admin queue. Therefore treat vp_dev->admin_vq.info.vq as
static, don't null it and don't take cmd lock during assign.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/virtio/virtio_pci_common.h | 2 +-
drivers/virtio/virtio_pci_modern.c | 11 +----------
2 files changed, 2 insertions(+), 11 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 0cb6fdc8c354..aa43875b6353 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -45,7 +45,7 @@ struct virtio_pci_vq_info {
struct virtio_pci_admin_vq {
/* Virtqueue info associated with this admin queue. */
struct virtio_pci_vq_info info;
- /* serializing admin commands execution and virtqueue deletion */
+ /* serializing admin commands execution. */
struct mutex cmd_lock;
u64 supported_cmds;
/* Name of the admin queue: avq.$vq_index. */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 3b5b9499a53a..6d653bea9a87 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -580,11 +580,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
goto err;
}
- if (is_avq) {
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
+ if (is_avq)
vp_dev->admin_vq.info.vq = vq;
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
- }
return vq;
@@ -620,12 +617,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- if (vp_is_avq(&vp_dev->vdev, vq->index)) {
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
- vp_dev->admin_vq.info.vq = NULL;
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
- }
-
if (vp_dev->msix_enabled)
vp_modern_queue_vector(mdev, vq->index,
VIRTIO_MSI_NO_VECTOR);
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 07/13] virtio: push out code to vp_avq_index()
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (5 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 06/13] virtio_pci_modern: treat vp_dev->admin_vq.info.vq pointer as static Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 08/13] virtio_pci: pass vq info as an argument to vp_setup_vq() Jiri Pirko
` (5 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
To prepare for the follow-up patch to use the code as an op, push out
the code that gets admin virtqueue base index and count to a separate
helper.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/virtio/virtio_pci_modern.c | 31 ++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 6d653bea9a87..d704947b1aec 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -28,6 +28,21 @@ static u64 vp_get_features(struct virtio_device *vdev)
return vp_modern_get_features(&vp_dev->mdev);
}
+static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ *num = 0;
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return 0;
+
+ *num = vp_modern_avq_num(&vp_dev->mdev);
+ if (!(*num))
+ return -EINVAL;
+ *index = vp_modern_avq_index(&vp_dev->mdev);
+ return 0;
+}
+
static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -729,19 +744,15 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
static int vp_modern_create_avq(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *avq;
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
struct virtqueue *vq;
- u16 admin_q_num;
-
- if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return 0;
+ u16 num;
+ int err;
- admin_q_num = vp_modern_avq_num(&vp_dev->mdev);
- if (!admin_q_num)
- return -EINVAL;
+ err = vp_avq_index(vdev, &avq->vq_index, &num);
+ if (err || !num)
+ return err;
- avq = &vp_dev->admin_vq;
- avq->vq_index = vp_modern_avq_index(&vp_dev->mdev);
sprintf(avq->name, "avq.%u", avq->vq_index);
vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 08/13] virtio_pci: pass vq info as an argument to vp_setup_vq()
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (6 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 07/13] virtio: push out code to vp_avq_index() Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 09/13] virtio: create admin queues alongside other virtqueues Jiri Pirko
` (4 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
Instead vp_setup_vq() storing vq info directly to vp_dev->vqs, let the
caller provide a pointer to store the info to. This prepares
vp_setup_vq() to be able to store admin queue info as well.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
v1->v2:
- new patch
---
drivers/virtio/virtio_pci_common.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index cb8bbe6c1a9f..7c5516ae1f8a 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -197,7 +197,8 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
void (*callback)(struct virtqueue *vq),
const char *name,
bool ctx,
- u16 msix_vec)
+ u16 msix_vec,
+ struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
@@ -225,7 +226,7 @@ static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int in
INIT_LIST_HEAD(&info->node);
}
- vp_dev->vqs[index] = info;
+ *p_info = info;
return vq;
out_info:
@@ -320,7 +321,8 @@ static struct virtqueue *
vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
vq_callback_t *callback, const char *name, bool ctx,
bool slow_path, int *allocated_vectors,
- enum vp_vq_vector_policy vector_policy)
+ enum vp_vq_vector_policy vector_policy,
+ struct virtio_pci_vq_info **p_info)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtqueue *vq;
@@ -338,7 +340,8 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
msix_vec = VP_MSIX_CONFIG_VECTOR;
else
msix_vec = VP_MSIX_VQ_VECTOR;
- vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec);
+ vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec,
+ p_info);
if (IS_ERR(vq))
return vq;
@@ -405,7 +408,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
}
vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
vqi->name, vqi->ctx, false,
- &allocated_vectors, vector_policy);
+ &allocated_vectors, vector_policy,
+ &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto error_find;
@@ -445,7 +449,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
}
vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
vqi->name, vqi->ctx,
- VIRTIO_MSI_NO_VECTOR);
+ VIRTIO_MSI_NO_VECTOR, &vp_dev->vqs[i]);
if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]);
goto out_del_vqs;
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 09/13] virtio: create admin queues alongside other virtqueues
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (7 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 08/13] virtio_pci: pass vq info as an argument to vp_setup_vq() Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:35 ` [PATCH virtio v2 10/13] virtio_pci_modern: create admin queue of queried size Jiri Pirko
` (3 subsequent siblings)
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
Admin virtqueue is just another virtqueue nothing that special about it.
The current implementation treats it somehow separate though in terms
of creation and deletion. Unify the admin virtqueue creation and
deletion flows to be aligned with the rest of virtqueues, creating
it from vp_find_vqs_*() helpers. Let the admin virtqueue to be deleted
by vp_del_vqs() as the rest.
Call vp_find_one_vq_msix() with slow_path argument being "true" to make
sure that in case of limited interrupt vectors the config vector is used
for admin queue.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
v1->v2:
- rebased vp_find_one_vq_msix() code on top of newly added patches
- extended patch description a bit
- store vq info pointer in struct virtio_pci_admin_vq, as admin queue
index does not have to be last+1 so vp_dev->vqs array is not really
suitable.
- let virtqueue_exec_admin_cmd() to use the info to get the vq
- don't store admin vq in vqs array
---
drivers/virtio/virtio.c | 28 +------------
drivers/virtio/virtio_pci_common.c | 43 ++++++++++++++++++--
drivers/virtio/virtio_pci_common.h | 4 +-
drivers/virtio/virtio_pci_modern.c | 63 +-----------------------------
include/linux/virtio_config.h | 2 -
5 files changed, 46 insertions(+), 94 deletions(-)
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 396d3cd49a1b..835cfbcb59c8 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -305,15 +305,9 @@ static int virtio_dev_probe(struct device *_d)
if (err)
goto err;
- if (dev->config->create_avq) {
- err = dev->config->create_avq(dev);
- if (err)
- goto err;
- }
-
err = drv->probe(dev);
if (err)
- goto err_probe;
+ goto err;
/* If probe didn't do it, mark device DRIVER_OK ourselves. */
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -326,9 +320,6 @@ static int virtio_dev_probe(struct device *_d)
return 0;
-err_probe:
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return err;
@@ -344,9 +335,6 @@ static void virtio_dev_remove(struct device *_d)
drv->remove(dev);
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
-
/* Driver should have reset device. */
WARN_ON_ONCE(dev->config->get_status(dev));
@@ -524,9 +512,6 @@ int virtio_device_freeze(struct virtio_device *dev)
}
}
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
-
return 0;
}
EXPORT_SYMBOL_GPL(virtio_device_freeze);
@@ -562,16 +547,10 @@ int virtio_device_restore(struct virtio_device *dev)
if (ret)
goto err;
- if (dev->config->create_avq) {
- ret = dev->config->create_avq(dev);
- if (ret)
- goto err;
- }
-
if (drv->restore) {
ret = drv->restore(dev);
if (ret)
- goto err_restore;
+ goto err;
}
/* If restore didn't do it, mark device DRIVER_OK ourselves. */
@@ -582,9 +561,6 @@ int virtio_device_restore(struct virtio_device *dev)
return 0;
-err_restore:
- if (dev->config->destroy_avq)
- dev->config->destroy_avq(dev);
err:
virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
return ret;
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 7c5516ae1f8a..267643bb1cd5 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -262,9 +262,6 @@ void vp_del_vqs(struct virtio_device *vdev)
int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
- if (vp_dev->is_avq && vp_dev->is_avq(vdev, vq->index))
- continue;
-
if (vp_dev->per_vq_vectors) {
int v = vp_dev->vqs[vq->index]->msix_vector;
@@ -371,14 +368,23 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
struct irq_affinity *desc)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
struct virtqueue_info *vqi;
int i, err, nvectors, allocated_vectors, queue_idx = 0;
+ struct virtqueue *vq;
bool per_vq_vectors;
+ u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ if (vp_dev->avq_index) {
+ err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
+ if (err)
+ goto error_find;
+ }
+
per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED;
if (per_vq_vectors) {
@@ -415,6 +421,18 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
goto error_find;
}
}
+
+ if (!avq_num)
+ return 0;
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
+ true, &allocated_vectors, vector_policy,
+ &vp_dev->admin_vq.info);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ goto error_find;
+ }
+
return 0;
error_find:
@@ -427,12 +445,21 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue_info vqs_info[])
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
int i, err, queue_idx = 0;
+ struct virtqueue *vq;
+ u16 avq_num = 0;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
+ if (vp_dev->avq_index) {
+ err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
+ if (err)
+ goto out_del_vqs;
+ }
+
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev);
if (err)
@@ -456,6 +483,16 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
}
}
+ if (!avq_num)
+ return 0;
+ sprintf(avq->name, "avq.%u", avq->vq_index);
+ vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
+ VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
+ if (IS_ERR(vq)) {
+ err = PTR_ERR(vq);
+ goto out_del_vqs;
+ }
+
return 0;
out_del_vqs:
vp_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index aa43875b6353..de59bb06ec3c 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -44,7 +44,7 @@ struct virtio_pci_vq_info {
struct virtio_pci_admin_vq {
/* Virtqueue info associated with this admin queue. */
- struct virtio_pci_vq_info info;
+ struct virtio_pci_vq_info *info;
/* serializing admin commands execution. */
struct mutex cmd_lock;
u64 supported_cmds;
@@ -105,7 +105,7 @@ struct virtio_pci_device {
void (*del_vq)(struct virtio_pci_vq_info *info);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
- bool (*is_avq)(struct virtio_device *vdev, unsigned int index);
+ int (*avq_index)(struct virtio_device *vdev, u16 *index, u16 *num);
};
/* Constants for MSI-X */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index d704947b1aec..5ceb4b2c18df 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -63,7 +63,7 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
struct virtqueue *vq;
int ret, len;
- vq = admin_vq->info.vq;
+ vq = admin_vq->info->vq;
if (!vq)
return -EIO;
@@ -203,27 +203,12 @@ static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
static void vp_modern_avq_activate(struct virtio_device *vdev)
{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
-
if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
return;
- __virtqueue_unbreak(admin_vq->info.vq);
virtio_pci_admin_cmd_list_init(vdev);
}
-static void vp_modern_avq_deactivate(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
-
- if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return;
-
- __virtqueue_break(admin_vq->info.vq);
-}
-
static void vp_transport_features(struct virtio_device *vdev, u64 features)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -418,8 +403,6 @@ static void vp_reset(struct virtio_device *vdev)
while (vp_modern_get_status(mdev))
msleep(1);
- vp_modern_avq_deactivate(vdev);
-
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -595,9 +578,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
goto err;
}
- if (is_avq)
- vp_dev->admin_vq.info.vq = vq;
-
return vq;
err:
@@ -741,41 +721,6 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
return true;
}
-static int vp_modern_create_avq(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
- struct virtqueue *vq;
- u16 num;
- int err;
-
- err = vp_avq_index(vdev, &avq->vq_index, &num);
- if (err || !num)
- return err;
-
- sprintf(avq->name, "avq.%u", avq->vq_index);
- vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
- avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
- if (IS_ERR(vq)) {
- dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld",
- PTR_ERR(vq));
- return PTR_ERR(vq);
- }
-
- vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true);
- return 0;
-}
-
-static void vp_modern_destroy_avq(struct virtio_device *vdev)
-{
- struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
- if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
- return;
-
- vp_dev->del_vq(&vp_dev->admin_vq.info);
-}
-
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL,
.set = NULL,
@@ -794,8 +739,6 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
- .create_avq = vp_modern_create_avq,
- .destroy_avq = vp_modern_destroy_avq,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -816,8 +759,6 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.get_shm_region = vp_get_shm_region,
.disable_vq_and_reset = vp_modern_disable_vq_and_reset,
.enable_vq_after_reset = vp_modern_enable_vq_after_reset,
- .create_avq = vp_modern_create_avq,
- .destroy_avq = vp_modern_destroy_avq,
};
/* the PCI probing function */
@@ -841,7 +782,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->config_vector = vp_config_vector;
vp_dev->setup_vq = setup_vq;
vp_dev->del_vq = del_vq;
- vp_dev->is_avq = vp_is_avq;
+ vp_dev->avq_index = vp_avq_index;
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index ab4b9a3fef6b..b9dc22ee0ed0 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -133,8 +133,6 @@ struct virtio_config_ops {
struct virtio_shm_region *region, u8 id);
int (*disable_vq_and_reset)(struct virtqueue *vq);
int (*enable_vq_after_reset)(struct virtqueue *vq);
- int (*create_avq)(struct virtio_device *vdev);
- void (*destroy_avq)(struct virtio_device *vdev);
};
/* If driver didn't advertise the feature, it will never appear. */
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 10/13] virtio_pci_modern: create admin queue of queried size
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (8 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 09/13] virtio: create admin queues alongside other virtqueues Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-14 7:55 ` Michael S. Tsirkin
2024-07-10 6:35 ` [PATCH virtio v2 11/13] virtio_pci_modern: pass cmd as an identification token Jiri Pirko
` (2 subsequent siblings)
12 siblings, 1 reply; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
Don't limit the admin queue size to VIRTIO_AVQ_SGS_MAX and rather rely
on the queried queue size.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/virtio/virtio_pci_modern.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 5ceb4b2c18df..a649c9dc436d 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -550,8 +550,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
return ERR_PTR(-EINVAL);
- num = is_avq ?
- VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
+ num = vp_modern_get_queue_size(mdev, index);
/* Check if queue is either not available or already active. */
if (!num || vp_modern_get_queue_enable(mdev, index))
return ERR_PTR(-ENOENT);
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH virtio v2 10/13] virtio_pci_modern: create admin queue of queried size
2024-07-10 6:35 ` [PATCH virtio v2 10/13] virtio_pci_modern: create admin queue of queried size Jiri Pirko
@ 2024-07-14 7:55 ` Michael S. Tsirkin
2024-07-14 14:28 ` Parav Pandit
0 siblings, 1 reply; 24+ messages in thread
From: Michael S. Tsirkin @ 2024-07-14 7:55 UTC (permalink / raw)
To: Jiri Pirko
Cc: virtualization, jasowang, xuanzhuo, eperezma, parav, feliu,
hengqi
On Wed, Jul 10, 2024 at 08:35:58AM +0200, Jiri Pirko wrote:
> From: Jiri Pirko <jiri@nvidia.com>
>
> Don't limit the admin queue size to VIRTIO_AVQ_SGS_MAX and rather rely
> on the queried queue size.
>
> Signed-off-by: Jiri Pirko <jiri@nvidia.com>
I have some doubts about this one. Can't we limit the size to something
sensible? E.g. max number of CPUs? Number of VFs? I don't see why we
should just follow what device did blindly, the device has no idea about
use so would tend to over-provision.
> ---
> drivers/virtio/virtio_pci_modern.c | 3 +--
> 1 file changed, 1 insertion(+), 2 deletions(-)
>
> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> index 5ceb4b2c18df..a649c9dc436d 100644
> --- a/drivers/virtio/virtio_pci_modern.c
> +++ b/drivers/virtio/virtio_pci_modern.c
> @@ -550,8 +550,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
> if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
> return ERR_PTR(-EINVAL);
>
> - num = is_avq ?
> - VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
> + num = vp_modern_get_queue_size(mdev, index);
> /* Check if queue is either not available or already active. */
> if (!num || vp_modern_get_queue_enable(mdev, index))
> return ERR_PTR(-ENOENT);
> --
> 2.45.2
^ permalink raw reply [flat|nested] 24+ messages in thread
* RE: [PATCH virtio v2 10/13] virtio_pci_modern: create admin queue of queried size
2024-07-14 7:55 ` Michael S. Tsirkin
@ 2024-07-14 14:28 ` Parav Pandit
2024-07-15 7:57 ` Jiri Pirko
0 siblings, 1 reply; 24+ messages in thread
From: Parav Pandit @ 2024-07-14 14:28 UTC (permalink / raw)
To: Michael S. Tsirkin, Jiri Pirko
Cc: virtualization@lists.linux.dev, jasowang@redhat.com,
xuanzhuo@linux.alibaba.com, eperezma@redhat.com, Feng Liu,
hengqi@linux.alibaba.com
> From: Michael S. Tsirkin <mst@redhat.com>
> Sent: Sunday, July 14, 2024 1:25 PM
>
> On Wed, Jul 10, 2024 at 08:35:58AM +0200, Jiri Pirko wrote:
> > From: Jiri Pirko <jiri@nvidia.com>
> >
> > Don't limit the admin queue size to VIRTIO_AVQ_SGS_MAX and rather rely
> > on the queried queue size.
> >
> > Signed-off-by: Jiri Pirko <jiri@nvidia.com>
>
> I have some doubts about this one. Can't we limit the size to something
> sensible? E.g. max number of CPUs? Number of VFs? I don't see why we
> should just follow what device did blindly, the device has no idea about use
> so would tend to over-provision.
>
+1.
I agree with Michael, we can possibly do min(max_cpus, device_supplied_limit).
When more use cases of it arise, this can be improved in future to use larger limit.
> > ---
> > drivers/virtio/virtio_pci_modern.c | 3 +--
> > 1 file changed, 1 insertion(+), 2 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_pci_modern.c
> > b/drivers/virtio/virtio_pci_modern.c
> > index 5ceb4b2c18df..a649c9dc436d 100644
> > --- a/drivers/virtio/virtio_pci_modern.c
> > +++ b/drivers/virtio/virtio_pci_modern.c
> > @@ -550,8 +550,7 @@ static struct virtqueue *setup_vq(struct
> virtio_pci_device *vp_dev,
> > if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
> > return ERR_PTR(-EINVAL);
> >
> > - num = is_avq ?
> > - VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev,
> index);
> > + num = vp_modern_get_queue_size(mdev, index);
> > /* Check if queue is either not available or already active. */
> > if (!num || vp_modern_get_queue_enable(mdev, index))
> > return ERR_PTR(-ENOENT);
> > --
> > 2.45.2
^ permalink raw reply [flat|nested] 24+ messages in thread
* Re: [PATCH virtio v2 10/13] virtio_pci_modern: create admin queue of queried size
2024-07-14 14:28 ` Parav Pandit
@ 2024-07-15 7:57 ` Jiri Pirko
2024-07-15 9:03 ` Michael S. Tsirkin
0 siblings, 1 reply; 24+ messages in thread
From: Jiri Pirko @ 2024-07-15 7:57 UTC (permalink / raw)
To: Parav Pandit
Cc: Michael S. Tsirkin, virtualization@lists.linux.dev,
jasowang@redhat.com, xuanzhuo@linux.alibaba.com,
eperezma@redhat.com, Feng Liu, hengqi@linux.alibaba.com
Sun, Jul 14, 2024 at 04:28:29PM CEST, parav@nvidia.com wrote:
>
>
>> From: Michael S. Tsirkin <mst@redhat.com>
>> Sent: Sunday, July 14, 2024 1:25 PM
>>
>> On Wed, Jul 10, 2024 at 08:35:58AM +0200, Jiri Pirko wrote:
>> > From: Jiri Pirko <jiri@nvidia.com>
>> >
>> > Don't limit the admin queue size to VIRTIO_AVQ_SGS_MAX and rather rely
>> > on the queried queue size.
>> >
>> > Signed-off-by: Jiri Pirko <jiri@nvidia.com>
>>
>> I have some doubts about this one. Can't we limit the size to something
>> sensible? E.g. max number of CPUs? Number of VFs? I don't see why we
>> should just follow what device did blindly, the device has no idea about use
>> so would tend to over-provision.
>>
>+1.
>I agree with Michael, we can possibly do min(max_cpus, device_supplied_limit).
>When more use cases of it arise, this can be improved in future to use larger limit.
Why max_cpus? How number of cpus is related here?
regarding max VFs is also value coming from the device,
why is that better?
For other queues, we also use number provided by the device. Why here
the behaviour whould be different? Device should know what scale to
prepare for, why to be smart here?
>
>> > ---
>> > drivers/virtio/virtio_pci_modern.c | 3 +--
>> > 1 file changed, 1 insertion(+), 2 deletions(-)
>> >
>> > diff --git a/drivers/virtio/virtio_pci_modern.c
>> > b/drivers/virtio/virtio_pci_modern.c
>> > index 5ceb4b2c18df..a649c9dc436d 100644
>> > --- a/drivers/virtio/virtio_pci_modern.c
>> > +++ b/drivers/virtio/virtio_pci_modern.c
>> > @@ -550,8 +550,7 @@ static struct virtqueue *setup_vq(struct
>> virtio_pci_device *vp_dev,
>> > if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
>> > return ERR_PTR(-EINVAL);
>> >
>> > - num = is_avq ?
>> > - VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev,
>> index);
>> > + num = vp_modern_get_queue_size(mdev, index);
>> > /* Check if queue is either not available or already active. */
>> > if (!num || vp_modern_get_queue_enable(mdev, index))
>> > return ERR_PTR(-ENOENT);
>> > --
>> > 2.45.2
>
^ permalink raw reply [flat|nested] 24+ messages in thread
* Re: [PATCH virtio v2 10/13] virtio_pci_modern: create admin queue of queried size
2024-07-15 7:57 ` Jiri Pirko
@ 2024-07-15 9:03 ` Michael S. Tsirkin
0 siblings, 0 replies; 24+ messages in thread
From: Michael S. Tsirkin @ 2024-07-15 9:03 UTC (permalink / raw)
To: Jiri Pirko
Cc: Parav Pandit, virtualization@lists.linux.dev, jasowang@redhat.com,
xuanzhuo@linux.alibaba.com, eperezma@redhat.com, Feng Liu,
hengqi@linux.alibaba.com
On Mon, Jul 15, 2024 at 09:57:31AM +0200, Jiri Pirko wrote:
> Sun, Jul 14, 2024 at 04:28:29PM CEST, parav@nvidia.com wrote:
> >
> >
> >> From: Michael S. Tsirkin <mst@redhat.com>
> >> Sent: Sunday, July 14, 2024 1:25 PM
> >>
> >> On Wed, Jul 10, 2024 at 08:35:58AM +0200, Jiri Pirko wrote:
> >> > From: Jiri Pirko <jiri@nvidia.com>
> >> >
> >> > Don't limit the admin queue size to VIRTIO_AVQ_SGS_MAX and rather rely
> >> > on the queried queue size.
> >> >
> >> > Signed-off-by: Jiri Pirko <jiri@nvidia.com>
> >>
> >> I have some doubts about this one. Can't we limit the size to something
> >> sensible? E.g. max number of CPUs? Number of VFs? I don't see why we
> >> should just follow what device did blindly, the device has no idea about use
> >> so would tend to over-provision.
> >>
> >+1.
> >I agree with Michael, we can possibly do min(max_cpus, device_supplied_limit).
> >When more use cases of it arise, this can be improved in future to use larger limit.
>
> Why max_cpus? How number of cpus is related here?
>
> regarding max VFs is also value coming from the device,
> why is that better?
I mean the actual # of VFs enabled - that is provided by
software, right?
> For other queues, we also use number provided by the device. Why here
> the behaviour whould be different? Device should know what scale to
> prepare for, why to be smart here?
How can a plug-in device know how it will be used?
For many queues, more is generally better.
But yes, it is quite possible we could do better than
just follow hardware, we just don't have a better idea yet.
>
>
> >
> >> > ---
> >> > drivers/virtio/virtio_pci_modern.c | 3 +--
> >> > 1 file changed, 1 insertion(+), 2 deletions(-)
> >> >
> >> > diff --git a/drivers/virtio/virtio_pci_modern.c
> >> > b/drivers/virtio/virtio_pci_modern.c
> >> > index 5ceb4b2c18df..a649c9dc436d 100644
> >> > --- a/drivers/virtio/virtio_pci_modern.c
> >> > +++ b/drivers/virtio/virtio_pci_modern.c
> >> > @@ -550,8 +550,7 @@ static struct virtqueue *setup_vq(struct
> >> virtio_pci_device *vp_dev,
> >> > if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
> >> > return ERR_PTR(-EINVAL);
> >> >
> >> > - num = is_avq ?
> >> > - VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev,
> >> index);
> >> > + num = vp_modern_get_queue_size(mdev, index);
> >> > /* Check if queue is either not available or already active. */
> >> > if (!num || vp_modern_get_queue_enable(mdev, index))
> >> > return ERR_PTR(-ENOENT);
> >> > --
> >> > 2.45.2
> >
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH virtio v2 11/13] virtio_pci_modern: pass cmd as an identification token
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (9 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 10/13] virtio_pci_modern: create admin queue of queried size Jiri Pirko
@ 2024-07-10 6:35 ` Jiri Pirko
2024-07-10 6:36 ` [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result Jiri Pirko
2024-07-10 6:36 ` [PATCH virtio v2 13/13] virtio_pci_modern: remove admin queue serialization lock Jiri Pirko
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:35 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
In preparation to asynchronous admin queue processing, pass cmd pointer
as a data arg to virtqueue_add_sgs().
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
drivers/virtio/virtio_pci_modern.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index a649c9dc436d..0fd344d1eaf9 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -58,7 +58,7 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
struct scatterlist **sgs,
unsigned int out_num,
unsigned int in_num,
- void *data)
+ struct virtio_admin_cmd *cmd)
{
struct virtqueue *vq;
int ret, len;
@@ -72,7 +72,7 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
!((1ULL << opcode) & admin_vq->supported_cmds))
return -EOPNOTSUPP;
- ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL);
+ ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
if (ret < 0)
return -EIO;
@@ -140,7 +140,7 @@ int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
mutex_lock(&vp_dev->admin_vq.cmd_lock);
ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
le16_to_cpu(cmd->opcode),
- sgs, out_num, in_num, sgs);
+ sgs, out_num, in_num, cmd);
mutex_unlock(&vp_dev->admin_vq.cmd_lock);
if (ret) {
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (10 preceding siblings ...)
2024-07-10 6:35 ` [PATCH virtio v2 11/13] virtio_pci_modern: pass cmd as an identification token Jiri Pirko
@ 2024-07-10 6:36 ` Jiri Pirko
2024-07-10 11:47 ` Michael S. Tsirkin
2024-07-10 6:36 ` [PATCH virtio v2 13/13] virtio_pci_modern: remove admin queue serialization lock Jiri Pirko
12 siblings, 1 reply; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:36 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
Currently, the code waits in a busy loop on every admin virtqueue issued
command to get a reply. That prevents callers from issuing multiple
commands in parallel.
To overcome this limitation, introduce a virtqueue event callback for
admin virtqueue. For every issued command, use completion mechanism
to wait on a reply. In the event callback, trigger the completion
is done for every incoming reply.
Alongside with that, introduce a spin lock to protect the admin
virtqueue operations.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
v1->v2:
- rebased on top of newly added patches
- rebased on top of changes in previous patches (vq info, vqs[])
- removed WARN_ON_ONCE() when calling virtqueue_kick()
- added virtqueue_is_broken check in virtqueue_exec_admin_cmd() loop
- added vp_modern_avq_cleanup() implementation to handle surprise
removal case
---
drivers/virtio/virtio_pci_common.c | 13 ++++--
drivers/virtio/virtio_pci_common.h | 3 ++
drivers/virtio/virtio_pci_modern.c | 74 +++++++++++++++++++++++++-----
include/linux/virtio.h | 3 ++
4 files changed, 77 insertions(+), 16 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 267643bb1cd5..c44d8ba00c02 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
if (vqi->name && vqi->callback)
++nvectors;
}
+ if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
+ ++nvectors;
} else {
/* Second best: one for change, shared for all vqs. */
nvectors = 2;
@@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
if (!avq_num)
return 0;
sprintf(avq->name, "avq.%u", avq->vq_index);
- vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
- true, &allocated_vectors, vector_policy,
- &vp_dev->admin_vq.info);
+ vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
+ avq->name, false, true, &allocated_vectors,
+ vector_policy, &vp_dev->admin_vq.info);
if (IS_ERR(vq)) {
err = PTR_ERR(vq);
goto error_find;
@@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
if (!avq_num)
return 0;
sprintf(avq->name, "avq.%u", avq->vq_index);
- vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
- VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
+ vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
+ false, VIRTIO_MSI_NO_VECTOR,
+ &vp_dev->admin_vq.info);
if (IS_ERR(vq)) {
err = PTR_ERR(vq);
goto out_del_vqs;
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index de59bb06ec3c..90df381fbbcf 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -47,6 +47,8 @@ struct virtio_pci_admin_vq {
struct virtio_pci_vq_info *info;
/* serializing admin commands execution. */
struct mutex cmd_lock;
+ /* Protects virtqueue access. */
+ spinlock_t lock;
u64 supported_cmds;
/* Name of the admin queue: avq.$vq_index. */
char name[10];
@@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
#define VIRTIO_ADMIN_CMD_BITMAP 0
#endif
+void vp_modern_avq_done(struct virtqueue *vq);
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
struct virtio_admin_cmd *cmd);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 0fd344d1eaf9..608df3263df1 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
return index == vp_dev->admin_vq.vq_index;
}
+void vp_modern_avq_done(struct virtqueue *vq)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+ struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
+ struct virtio_admin_cmd *cmd;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ do {
+ virtqueue_disable_cb(vq);
+ while ((cmd = virtqueue_get_buf(vq, &len)))
+ complete(&cmd->completion);
+ } while (!virtqueue_enable_cb(vq));
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+}
+
static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
u16 opcode,
struct scatterlist **sgs,
@@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
struct virtio_admin_cmd *cmd)
{
struct virtqueue *vq;
- int ret, len;
+ unsigned long flags;
+ int ret;
vq = admin_vq->info->vq;
if (!vq)
@@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
!((1ULL << opcode) & admin_vq->supported_cmds))
return -EOPNOTSUPP;
- ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
- if (ret < 0)
- return -EIO;
+ init_completion(&cmd->completion);
- if (unlikely(!virtqueue_kick(vq)))
+again:
+ if (virtqueue_is_broken(vq))
return -EIO;
- while (!virtqueue_get_buf(vq, &len) &&
- !virtqueue_is_broken(vq))
- cpu_relax();
+ spin_lock_irqsave(&admin_vq->lock, flags);
+ ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ cpu_relax();
+ goto again;
+ }
+ goto unlock_err;
+ }
+ if (!virtqueue_kick(vq))
+ goto unlock_err;
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
- if (virtqueue_is_broken(vq))
- return -EIO;
+ wait_for_completion(&cmd->completion);
- return 0;
+ return cmd->ret;
+
+unlock_err:
+ spin_unlock_irqrestore(&admin_vq->lock, flags);
+ return -EIO;
}
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
@@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
virtio_pci_admin_cmd_list_init(vdev);
}
+static void vp_modern_avq_cleanup(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct virtio_admin_cmd *cmd;
+ struct virtqueue *vq;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
+ return;
+
+ vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
+ if (!vq)
+ return;
+
+ while ((cmd = virtqueue_detach_unused_buf(vq))) {
+ cmd->ret = -EIO;
+ complete(&cmd->completion);
+ }
+}
+
static void vp_transport_features(struct virtio_device *vdev, u64 features)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -403,6 +452,8 @@ static void vp_reset(struct virtio_device *vdev)
while (vp_modern_get_status(mdev))
msleep(1);
+ vp_modern_avq_cleanup(vdev);
+
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -785,6 +836,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->isr = mdev->isr;
vp_dev->vdev.id = mdev->id;
+ spin_lock_init(&vp_dev->admin_vq.lock);
mutex_init(&vp_dev->admin_vq.cmd_lock);
return 0;
}
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 96fea920873b..999ff5934392 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -10,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
+#include <linux/completion.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -109,6 +110,8 @@ struct virtio_admin_cmd {
__le64 group_member_id;
struct scatterlist *data_sg;
struct scatterlist *result_sg;
+ struct completion completion;
+ int ret;
};
/**
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread* Re: [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
2024-07-10 6:36 ` [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result Jiri Pirko
@ 2024-07-10 11:47 ` Michael S. Tsirkin
2024-07-10 13:03 ` Jiri Pirko
0 siblings, 1 reply; 24+ messages in thread
From: Michael S. Tsirkin @ 2024-07-10 11:47 UTC (permalink / raw)
To: Jiri Pirko
Cc: virtualization, jasowang, xuanzhuo, eperezma, parav, feliu,
hengqi
On Wed, Jul 10, 2024 at 08:36:00AM +0200, Jiri Pirko wrote:
> From: Jiri Pirko <jiri@nvidia.com>
>
> Currently, the code waits in a busy loop on every admin virtqueue issued
> command to get a reply. That prevents callers from issuing multiple
> commands in parallel.
>
> To overcome this limitation, introduce a virtqueue event callback for
> admin virtqueue. For every issued command, use completion mechanism
> to wait on a reply. In the event callback, trigger the completion
> is done for every incoming reply.
>
> Alongside with that, introduce a spin lock to protect the admin
> virtqueue operations.
>
> Signed-off-by: Jiri Pirko <jiri@nvidia.com>
> ---
> v1->v2:
> - rebased on top of newly added patches
> - rebased on top of changes in previous patches (vq info, vqs[])
> - removed WARN_ON_ONCE() when calling virtqueue_kick()
> - added virtqueue_is_broken check in virtqueue_exec_admin_cmd() loop
> - added vp_modern_avq_cleanup() implementation to handle surprise
> removal case
> ---
> drivers/virtio/virtio_pci_common.c | 13 ++++--
> drivers/virtio/virtio_pci_common.h | 3 ++
> drivers/virtio/virtio_pci_modern.c | 74 +++++++++++++++++++++++++-----
> include/linux/virtio.h | 3 ++
> 4 files changed, 77 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> index 267643bb1cd5..c44d8ba00c02 100644
> --- a/drivers/virtio/virtio_pci_common.c
> +++ b/drivers/virtio/virtio_pci_common.c
> @@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
> if (vqi->name && vqi->callback)
> ++nvectors;
> }
> + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
> + ++nvectors;
> } else {
> /* Second best: one for change, shared for all vqs. */
> nvectors = 2;
> @@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
> if (!avq_num)
> return 0;
> sprintf(avq->name, "avq.%u", avq->vq_index);
> - vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
> - true, &allocated_vectors, vector_policy,
> - &vp_dev->admin_vq.info);
> + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
> + avq->name, false, true, &allocated_vectors,
> + vector_policy, &vp_dev->admin_vq.info);
> if (IS_ERR(vq)) {
> err = PTR_ERR(vq);
> goto error_find;
> @@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
> if (!avq_num)
> return 0;
> sprintf(avq->name, "avq.%u", avq->vq_index);
> - vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
> - VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
> + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
> + false, VIRTIO_MSI_NO_VECTOR,
> + &vp_dev->admin_vq.info);
> if (IS_ERR(vq)) {
> err = PTR_ERR(vq);
> goto out_del_vqs;
> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
> index de59bb06ec3c..90df381fbbcf 100644
> --- a/drivers/virtio/virtio_pci_common.h
> +++ b/drivers/virtio/virtio_pci_common.h
> @@ -47,6 +47,8 @@ struct virtio_pci_admin_vq {
> struct virtio_pci_vq_info *info;
> /* serializing admin commands execution. */
> struct mutex cmd_lock;
> + /* Protects virtqueue access. */
> + spinlock_t lock;
> u64 supported_cmds;
> /* Name of the admin queue: avq.$vq_index. */
> char name[10];
> @@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
> #define VIRTIO_ADMIN_CMD_BITMAP 0
> #endif
>
> +void vp_modern_avq_done(struct virtqueue *vq);
> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
> struct virtio_admin_cmd *cmd);
>
> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> index 0fd344d1eaf9..608df3263df1 100644
> --- a/drivers/virtio/virtio_pci_modern.c
> +++ b/drivers/virtio/virtio_pci_modern.c
> @@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
> return index == vp_dev->admin_vq.vq_index;
> }
>
> +void vp_modern_avq_done(struct virtqueue *vq)
> +{
> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
> + struct virtio_admin_cmd *cmd;
> + unsigned long flags;
> + unsigned int len;
> +
> + spin_lock_irqsave(&admin_vq->lock, flags);
> + do {
> + virtqueue_disable_cb(vq);
> + while ((cmd = virtqueue_get_buf(vq, &len)))
> + complete(&cmd->completion);
> + } while (!virtqueue_enable_cb(vq));
> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> +}
> +
> static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> u16 opcode,
> struct scatterlist **sgs,
> @@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> struct virtio_admin_cmd *cmd)
> {
> struct virtqueue *vq;
> - int ret, len;
> + unsigned long flags;
> + int ret;
>
> vq = admin_vq->info->vq;
> if (!vq)
> @@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> !((1ULL << opcode) & admin_vq->supported_cmds))
> return -EOPNOTSUPP;
>
> - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
> - if (ret < 0)
> - return -EIO;
> + init_completion(&cmd->completion);
>
> - if (unlikely(!virtqueue_kick(vq)))
> +again:
> + if (virtqueue_is_broken(vq))
> return -EIO;
>
> - while (!virtqueue_get_buf(vq, &len) &&
> - !virtqueue_is_broken(vq))
> - cpu_relax();
> + spin_lock_irqsave(&admin_vq->lock, flags);
> + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
> + if (ret < 0) {
> + if (ret == -ENOSPC) {
> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> + cpu_relax();
> + goto again;
> + }
> + goto unlock_err;
> + }
> + if (!virtqueue_kick(vq))
> + goto unlock_err;
> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>
> - if (virtqueue_is_broken(vq))
> - return -EIO;
> + wait_for_completion(&cmd->completion);
>
> - return 0;
> + return cmd->ret;
> +
> +unlock_err:
> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> + return -EIO;
> }
>
> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
> @@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
> virtio_pci_admin_cmd_list_init(vdev);
> }
>
> +static void vp_modern_avq_cleanup(struct virtio_device *vdev)
> +{
> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> + struct virtio_admin_cmd *cmd;
> + struct virtqueue *vq;
> +
> + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
> + return;
> +
> + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
> + if (!vq)
> + return;
> +
> + while ((cmd = virtqueue_detach_unused_buf(vq))) {
> + cmd->ret = -EIO;
> + complete(&cmd->completion);
> + }
> +}
> +
I think surprise removal is still broken with this.
You need to add a callback and signal the completion.
--
MST
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
2024-07-10 11:47 ` Michael S. Tsirkin
@ 2024-07-10 13:03 ` Jiri Pirko
2024-07-10 13:06 ` Michael S. Tsirkin
0 siblings, 1 reply; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 13:03 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: virtualization, jasowang, xuanzhuo, eperezma, parav, feliu,
hengqi
Wed, Jul 10, 2024 at 01:47:22PM CEST, mst@redhat.com wrote:
>On Wed, Jul 10, 2024 at 08:36:00AM +0200, Jiri Pirko wrote:
>> From: Jiri Pirko <jiri@nvidia.com>
>>
>> Currently, the code waits in a busy loop on every admin virtqueue issued
>> command to get a reply. That prevents callers from issuing multiple
>> commands in parallel.
>>
>> To overcome this limitation, introduce a virtqueue event callback for
>> admin virtqueue. For every issued command, use completion mechanism
>> to wait on a reply. In the event callback, trigger the completion
>> is done for every incoming reply.
>>
>> Alongside with that, introduce a spin lock to protect the admin
>> virtqueue operations.
>>
>> Signed-off-by: Jiri Pirko <jiri@nvidia.com>
>> ---
>> v1->v2:
>> - rebased on top of newly added patches
>> - rebased on top of changes in previous patches (vq info, vqs[])
>> - removed WARN_ON_ONCE() when calling virtqueue_kick()
>> - added virtqueue_is_broken check in virtqueue_exec_admin_cmd() loop
>> - added vp_modern_avq_cleanup() implementation to handle surprise
>> removal case
>> ---
>> drivers/virtio/virtio_pci_common.c | 13 ++++--
>> drivers/virtio/virtio_pci_common.h | 3 ++
>> drivers/virtio/virtio_pci_modern.c | 74 +++++++++++++++++++++++++-----
>> include/linux/virtio.h | 3 ++
>> 4 files changed, 77 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
>> index 267643bb1cd5..c44d8ba00c02 100644
>> --- a/drivers/virtio/virtio_pci_common.c
>> +++ b/drivers/virtio/virtio_pci_common.c
>> @@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
>> if (vqi->name && vqi->callback)
>> ++nvectors;
>> }
>> + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
>> + ++nvectors;
>> } else {
>> /* Second best: one for change, shared for all vqs. */
>> nvectors = 2;
>> @@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
>> if (!avq_num)
>> return 0;
>> sprintf(avq->name, "avq.%u", avq->vq_index);
>> - vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
>> - true, &allocated_vectors, vector_policy,
>> - &vp_dev->admin_vq.info);
>> + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
>> + avq->name, false, true, &allocated_vectors,
>> + vector_policy, &vp_dev->admin_vq.info);
>> if (IS_ERR(vq)) {
>> err = PTR_ERR(vq);
>> goto error_find;
>> @@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
>> if (!avq_num)
>> return 0;
>> sprintf(avq->name, "avq.%u", avq->vq_index);
>> - vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
>> - VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
>> + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
>> + false, VIRTIO_MSI_NO_VECTOR,
>> + &vp_dev->admin_vq.info);
>> if (IS_ERR(vq)) {
>> err = PTR_ERR(vq);
>> goto out_del_vqs;
>> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
>> index de59bb06ec3c..90df381fbbcf 100644
>> --- a/drivers/virtio/virtio_pci_common.h
>> +++ b/drivers/virtio/virtio_pci_common.h
>> @@ -47,6 +47,8 @@ struct virtio_pci_admin_vq {
>> struct virtio_pci_vq_info *info;
>> /* serializing admin commands execution. */
>> struct mutex cmd_lock;
>> + /* Protects virtqueue access. */
>> + spinlock_t lock;
>> u64 supported_cmds;
>> /* Name of the admin queue: avq.$vq_index. */
>> char name[10];
>> @@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
>> #define VIRTIO_ADMIN_CMD_BITMAP 0
>> #endif
>>
>> +void vp_modern_avq_done(struct virtqueue *vq);
>> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
>> struct virtio_admin_cmd *cmd);
>>
>> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
>> index 0fd344d1eaf9..608df3263df1 100644
>> --- a/drivers/virtio/virtio_pci_modern.c
>> +++ b/drivers/virtio/virtio_pci_modern.c
>> @@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
>> return index == vp_dev->admin_vq.vq_index;
>> }
>>
>> +void vp_modern_avq_done(struct virtqueue *vq)
>> +{
>> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
>> + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
>> + struct virtio_admin_cmd *cmd;
>> + unsigned long flags;
>> + unsigned int len;
>> +
>> + spin_lock_irqsave(&admin_vq->lock, flags);
>> + do {
>> + virtqueue_disable_cb(vq);
>> + while ((cmd = virtqueue_get_buf(vq, &len)))
>> + complete(&cmd->completion);
>> + } while (!virtqueue_enable_cb(vq));
>> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> +}
>> +
>> static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> u16 opcode,
>> struct scatterlist **sgs,
>> @@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> struct virtio_admin_cmd *cmd)
>> {
>> struct virtqueue *vq;
>> - int ret, len;
>> + unsigned long flags;
>> + int ret;
>>
>> vq = admin_vq->info->vq;
>> if (!vq)
>> @@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> !((1ULL << opcode) & admin_vq->supported_cmds))
>> return -EOPNOTSUPP;
>>
>> - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
>> - if (ret < 0)
>> - return -EIO;
>> + init_completion(&cmd->completion);
>>
>> - if (unlikely(!virtqueue_kick(vq)))
>> +again:
>> + if (virtqueue_is_broken(vq))
>> return -EIO;
>>
>> - while (!virtqueue_get_buf(vq, &len) &&
>> - !virtqueue_is_broken(vq))
>> - cpu_relax();
>> + spin_lock_irqsave(&admin_vq->lock, flags);
>> + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
>> + if (ret < 0) {
>> + if (ret == -ENOSPC) {
>> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> + cpu_relax();
>> + goto again;
>> + }
>> + goto unlock_err;
>> + }
>> + if (!virtqueue_kick(vq))
>> + goto unlock_err;
>> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>>
>> - if (virtqueue_is_broken(vq))
>> - return -EIO;
>> + wait_for_completion(&cmd->completion);
>>
>> - return 0;
>> + return cmd->ret;
>> +
>> +unlock_err:
>> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> + return -EIO;
>> }
>>
>> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
>> @@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
>> virtio_pci_admin_cmd_list_init(vdev);
>> }
>>
>> +static void vp_modern_avq_cleanup(struct virtio_device *vdev)
>> +{
>> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
>> + struct virtio_admin_cmd *cmd;
>> + struct virtqueue *vq;
>> +
>> + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
>> + return;
>> +
>> + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
>> + if (!vq)
>> + return;
>> +
>> + while ((cmd = virtqueue_detach_unused_buf(vq))) {
>> + cmd->ret = -EIO;
>> + complete(&cmd->completion);
>> + }
>> +}
>> +
>
>
>I think surprise removal is still broken with this.
Why do you think so? I will drain the remaining buffers from the queue
and complete them all. What's missing?
>You need to add a callback and signal the completion.
Callback to what?
>
>--
>MST
>
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
2024-07-10 13:03 ` Jiri Pirko
@ 2024-07-10 13:06 ` Michael S. Tsirkin
2024-07-11 8:10 ` Jiri Pirko
0 siblings, 1 reply; 24+ messages in thread
From: Michael S. Tsirkin @ 2024-07-10 13:06 UTC (permalink / raw)
To: Jiri Pirko
Cc: virtualization, jasowang, xuanzhuo, eperezma, parav, feliu,
hengqi
On Wed, Jul 10, 2024 at 03:03:33PM +0200, Jiri Pirko wrote:
> Wed, Jul 10, 2024 at 01:47:22PM CEST, mst@redhat.com wrote:
> >On Wed, Jul 10, 2024 at 08:36:00AM +0200, Jiri Pirko wrote:
> >> From: Jiri Pirko <jiri@nvidia.com>
> >>
> >> Currently, the code waits in a busy loop on every admin virtqueue issued
> >> command to get a reply. That prevents callers from issuing multiple
> >> commands in parallel.
> >>
> >> To overcome this limitation, introduce a virtqueue event callback for
> >> admin virtqueue. For every issued command, use completion mechanism
> >> to wait on a reply. In the event callback, trigger the completion
> >> is done for every incoming reply.
> >>
> >> Alongside with that, introduce a spin lock to protect the admin
> >> virtqueue operations.
> >>
> >> Signed-off-by: Jiri Pirko <jiri@nvidia.com>
> >> ---
> >> v1->v2:
> >> - rebased on top of newly added patches
> >> - rebased on top of changes in previous patches (vq info, vqs[])
> >> - removed WARN_ON_ONCE() when calling virtqueue_kick()
> >> - added virtqueue_is_broken check in virtqueue_exec_admin_cmd() loop
> >> - added vp_modern_avq_cleanup() implementation to handle surprise
> >> removal case
> >> ---
> >> drivers/virtio/virtio_pci_common.c | 13 ++++--
> >> drivers/virtio/virtio_pci_common.h | 3 ++
> >> drivers/virtio/virtio_pci_modern.c | 74 +++++++++++++++++++++++++-----
> >> include/linux/virtio.h | 3 ++
> >> 4 files changed, 77 insertions(+), 16 deletions(-)
> >>
> >> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> >> index 267643bb1cd5..c44d8ba00c02 100644
> >> --- a/drivers/virtio/virtio_pci_common.c
> >> +++ b/drivers/virtio/virtio_pci_common.c
> >> @@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
> >> if (vqi->name && vqi->callback)
> >> ++nvectors;
> >> }
> >> + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
> >> + ++nvectors;
> >> } else {
> >> /* Second best: one for change, shared for all vqs. */
> >> nvectors = 2;
> >> @@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
> >> if (!avq_num)
> >> return 0;
> >> sprintf(avq->name, "avq.%u", avq->vq_index);
> >> - vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
> >> - true, &allocated_vectors, vector_policy,
> >> - &vp_dev->admin_vq.info);
> >> + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
> >> + avq->name, false, true, &allocated_vectors,
> >> + vector_policy, &vp_dev->admin_vq.info);
> >> if (IS_ERR(vq)) {
> >> err = PTR_ERR(vq);
> >> goto error_find;
> >> @@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
> >> if (!avq_num)
> >> return 0;
> >> sprintf(avq->name, "avq.%u", avq->vq_index);
> >> - vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
> >> - VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
> >> + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
> >> + false, VIRTIO_MSI_NO_VECTOR,
> >> + &vp_dev->admin_vq.info);
> >> if (IS_ERR(vq)) {
> >> err = PTR_ERR(vq);
> >> goto out_del_vqs;
> >> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
> >> index de59bb06ec3c..90df381fbbcf 100644
> >> --- a/drivers/virtio/virtio_pci_common.h
> >> +++ b/drivers/virtio/virtio_pci_common.h
> >> @@ -47,6 +47,8 @@ struct virtio_pci_admin_vq {
> >> struct virtio_pci_vq_info *info;
> >> /* serializing admin commands execution. */
> >> struct mutex cmd_lock;
> >> + /* Protects virtqueue access. */
> >> + spinlock_t lock;
> >> u64 supported_cmds;
> >> /* Name of the admin queue: avq.$vq_index. */
> >> char name[10];
> >> @@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
> >> #define VIRTIO_ADMIN_CMD_BITMAP 0
> >> #endif
> >>
> >> +void vp_modern_avq_done(struct virtqueue *vq);
> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
> >> struct virtio_admin_cmd *cmd);
> >>
> >> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> >> index 0fd344d1eaf9..608df3263df1 100644
> >> --- a/drivers/virtio/virtio_pci_modern.c
> >> +++ b/drivers/virtio/virtio_pci_modern.c
> >> @@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
> >> return index == vp_dev->admin_vq.vq_index;
> >> }
> >>
> >> +void vp_modern_avq_done(struct virtqueue *vq)
> >> +{
> >> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> >> + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
> >> + struct virtio_admin_cmd *cmd;
> >> + unsigned long flags;
> >> + unsigned int len;
> >> +
> >> + spin_lock_irqsave(&admin_vq->lock, flags);
> >> + do {
> >> + virtqueue_disable_cb(vq);
> >> + while ((cmd = virtqueue_get_buf(vq, &len)))
> >> + complete(&cmd->completion);
> >> + } while (!virtqueue_enable_cb(vq));
> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> >> +}
> >> +
> >> static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> >> u16 opcode,
> >> struct scatterlist **sgs,
> >> @@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> >> struct virtio_admin_cmd *cmd)
> >> {
> >> struct virtqueue *vq;
> >> - int ret, len;
> >> + unsigned long flags;
> >> + int ret;
> >>
> >> vq = admin_vq->info->vq;
> >> if (!vq)
> >> @@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> >> !((1ULL << opcode) & admin_vq->supported_cmds))
> >> return -EOPNOTSUPP;
> >>
> >> - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
> >> - if (ret < 0)
> >> - return -EIO;
> >> + init_completion(&cmd->completion);
> >>
> >> - if (unlikely(!virtqueue_kick(vq)))
> >> +again:
> >> + if (virtqueue_is_broken(vq))
> >> return -EIO;
> >>
> >> - while (!virtqueue_get_buf(vq, &len) &&
> >> - !virtqueue_is_broken(vq))
> >> - cpu_relax();
> >> + spin_lock_irqsave(&admin_vq->lock, flags);
> >> + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
> >> + if (ret < 0) {
> >> + if (ret == -ENOSPC) {
> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> >> + cpu_relax();
> >> + goto again;
> >> + }
> >> + goto unlock_err;
> >> + }
> >> + if (!virtqueue_kick(vq))
> >> + goto unlock_err;
> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> >>
> >> - if (virtqueue_is_broken(vq))
> >> - return -EIO;
> >> + wait_for_completion(&cmd->completion);
> >>
> >> - return 0;
> >> + return cmd->ret;
> >> +
> >> +unlock_err:
> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> >> + return -EIO;
> >> }
> >>
> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
> >> @@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
> >> virtio_pci_admin_cmd_list_init(vdev);
> >> }
> >>
> >> +static void vp_modern_avq_cleanup(struct virtio_device *vdev)
> >> +{
> >> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> >> + struct virtio_admin_cmd *cmd;
> >> + struct virtqueue *vq;
> >> +
> >> + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
> >> + return;
> >> +
> >> + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
> >> + if (!vq)
> >> + return;
> >> +
> >> + while ((cmd = virtqueue_detach_unused_buf(vq))) {
> >> + cmd->ret = -EIO;
> >> + complete(&cmd->completion);
> >> + }
> >> +}
> >> +
> >
> >
> >I think surprise removal is still broken with this.
>
> Why do you think so? I will drain the remaining buffers from the queue
> and complete them all. What's missing?
You have this:
> >> + wait_for_completion(&cmd->completion);
if surprise removal triggers after you submitted command
but before it was used, no callback triggers and so no
completion either.
>
> >You need to add a callback and signal the completion.
>
> Callback to what?
for example, remove callback can signal completion.
>
>
>
> >
> >--
> >MST
> >
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
2024-07-10 13:06 ` Michael S. Tsirkin
@ 2024-07-11 8:10 ` Jiri Pirko
2024-07-11 8:19 ` Michael S. Tsirkin
0 siblings, 1 reply; 24+ messages in thread
From: Jiri Pirko @ 2024-07-11 8:10 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: virtualization, jasowang, xuanzhuo, eperezma, parav, feliu,
hengqi
Wed, Jul 10, 2024 at 03:06:57PM CEST, mst@redhat.com wrote:
>On Wed, Jul 10, 2024 at 03:03:33PM +0200, Jiri Pirko wrote:
>> Wed, Jul 10, 2024 at 01:47:22PM CEST, mst@redhat.com wrote:
>> >On Wed, Jul 10, 2024 at 08:36:00AM +0200, Jiri Pirko wrote:
>> >> From: Jiri Pirko <jiri@nvidia.com>
>> >>
>> >> Currently, the code waits in a busy loop on every admin virtqueue issued
>> >> command to get a reply. That prevents callers from issuing multiple
>> >> commands in parallel.
>> >>
>> >> To overcome this limitation, introduce a virtqueue event callback for
>> >> admin virtqueue. For every issued command, use completion mechanism
>> >> to wait on a reply. In the event callback, trigger the completion
>> >> is done for every incoming reply.
>> >>
>> >> Alongside with that, introduce a spin lock to protect the admin
>> >> virtqueue operations.
>> >>
>> >> Signed-off-by: Jiri Pirko <jiri@nvidia.com>
>> >> ---
>> >> v1->v2:
>> >> - rebased on top of newly added patches
>> >> - rebased on top of changes in previous patches (vq info, vqs[])
>> >> - removed WARN_ON_ONCE() when calling virtqueue_kick()
>> >> - added virtqueue_is_broken check in virtqueue_exec_admin_cmd() loop
>> >> - added vp_modern_avq_cleanup() implementation to handle surprise
>> >> removal case
>> >> ---
>> >> drivers/virtio/virtio_pci_common.c | 13 ++++--
>> >> drivers/virtio/virtio_pci_common.h | 3 ++
>> >> drivers/virtio/virtio_pci_modern.c | 74 +++++++++++++++++++++++++-----
>> >> include/linux/virtio.h | 3 ++
>> >> 4 files changed, 77 insertions(+), 16 deletions(-)
>> >>
>> >> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
>> >> index 267643bb1cd5..c44d8ba00c02 100644
>> >> --- a/drivers/virtio/virtio_pci_common.c
>> >> +++ b/drivers/virtio/virtio_pci_common.c
>> >> @@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
>> >> if (vqi->name && vqi->callback)
>> >> ++nvectors;
>> >> }
>> >> + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
>> >> + ++nvectors;
>> >> } else {
>> >> /* Second best: one for change, shared for all vqs. */
>> >> nvectors = 2;
>> >> @@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
>> >> if (!avq_num)
>> >> return 0;
>> >> sprintf(avq->name, "avq.%u", avq->vq_index);
>> >> - vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
>> >> - true, &allocated_vectors, vector_policy,
>> >> - &vp_dev->admin_vq.info);
>> >> + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
>> >> + avq->name, false, true, &allocated_vectors,
>> >> + vector_policy, &vp_dev->admin_vq.info);
>> >> if (IS_ERR(vq)) {
>> >> err = PTR_ERR(vq);
>> >> goto error_find;
>> >> @@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
>> >> if (!avq_num)
>> >> return 0;
>> >> sprintf(avq->name, "avq.%u", avq->vq_index);
>> >> - vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
>> >> - VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
>> >> + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
>> >> + false, VIRTIO_MSI_NO_VECTOR,
>> >> + &vp_dev->admin_vq.info);
>> >> if (IS_ERR(vq)) {
>> >> err = PTR_ERR(vq);
>> >> goto out_del_vqs;
>> >> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
>> >> index de59bb06ec3c..90df381fbbcf 100644
>> >> --- a/drivers/virtio/virtio_pci_common.h
>> >> +++ b/drivers/virtio/virtio_pci_common.h
>> >> @@ -47,6 +47,8 @@ struct virtio_pci_admin_vq {
>> >> struct virtio_pci_vq_info *info;
>> >> /* serializing admin commands execution. */
>> >> struct mutex cmd_lock;
>> >> + /* Protects virtqueue access. */
>> >> + spinlock_t lock;
>> >> u64 supported_cmds;
>> >> /* Name of the admin queue: avq.$vq_index. */
>> >> char name[10];
>> >> @@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
>> >> #define VIRTIO_ADMIN_CMD_BITMAP 0
>> >> #endif
>> >>
>> >> +void vp_modern_avq_done(struct virtqueue *vq);
>> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
>> >> struct virtio_admin_cmd *cmd);
>> >>
>> >> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
>> >> index 0fd344d1eaf9..608df3263df1 100644
>> >> --- a/drivers/virtio/virtio_pci_modern.c
>> >> +++ b/drivers/virtio/virtio_pci_modern.c
>> >> @@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
>> >> return index == vp_dev->admin_vq.vq_index;
>> >> }
>> >>
>> >> +void vp_modern_avq_done(struct virtqueue *vq)
>> >> +{
>> >> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
>> >> + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
>> >> + struct virtio_admin_cmd *cmd;
>> >> + unsigned long flags;
>> >> + unsigned int len;
>> >> +
>> >> + spin_lock_irqsave(&admin_vq->lock, flags);
>> >> + do {
>> >> + virtqueue_disable_cb(vq);
>> >> + while ((cmd = virtqueue_get_buf(vq, &len)))
>> >> + complete(&cmd->completion);
>> >> + } while (!virtqueue_enable_cb(vq));
>> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> >> +}
>> >> +
>> >> static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> >> u16 opcode,
>> >> struct scatterlist **sgs,
>> >> @@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> >> struct virtio_admin_cmd *cmd)
>> >> {
>> >> struct virtqueue *vq;
>> >> - int ret, len;
>> >> + unsigned long flags;
>> >> + int ret;
>> >>
>> >> vq = admin_vq->info->vq;
>> >> if (!vq)
>> >> @@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> >> !((1ULL << opcode) & admin_vq->supported_cmds))
>> >> return -EOPNOTSUPP;
>> >>
>> >> - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
>> >> - if (ret < 0)
>> >> - return -EIO;
>> >> + init_completion(&cmd->completion);
>> >>
>> >> - if (unlikely(!virtqueue_kick(vq)))
>> >> +again:
>> >> + if (virtqueue_is_broken(vq))
>> >> return -EIO;
>> >>
>> >> - while (!virtqueue_get_buf(vq, &len) &&
>> >> - !virtqueue_is_broken(vq))
>> >> - cpu_relax();
>> >> + spin_lock_irqsave(&admin_vq->lock, flags);
>> >> + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
>> >> + if (ret < 0) {
>> >> + if (ret == -ENOSPC) {
>> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> >> + cpu_relax();
>> >> + goto again;
>> >> + }
>> >> + goto unlock_err;
>> >> + }
>> >> + if (!virtqueue_kick(vq))
>> >> + goto unlock_err;
>> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> >>
>> >> - if (virtqueue_is_broken(vq))
>> >> - return -EIO;
>> >> + wait_for_completion(&cmd->completion);
>> >>
>> >> - return 0;
>> >> + return cmd->ret;
>> >> +
>> >> +unlock_err:
>> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> >> + return -EIO;
>> >> }
>> >>
>> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
>> >> @@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
>> >> virtio_pci_admin_cmd_list_init(vdev);
>> >> }
>> >>
>> >> +static void vp_modern_avq_cleanup(struct virtio_device *vdev)
>> >> +{
>> >> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
>> >> + struct virtio_admin_cmd *cmd;
>> >> + struct virtqueue *vq;
>> >> +
>> >> + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
>> >> + return;
>> >> +
>> >> + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
>> >> + if (!vq)
>> >> + return;
>> >> +
>> >> + while ((cmd = virtqueue_detach_unused_buf(vq))) {
>> >> + cmd->ret = -EIO;
>> >> + complete(&cmd->completion);
>> >> + }
>> >> +}
>> >> +
>> >
>> >
>> >I think surprise removal is still broken with this.
>>
>> Why do you think so? I will drain the remaining buffers from the queue
>> and complete them all. What's missing?
>
>You have this:
>
>> >> + wait_for_completion(&cmd->completion);
>
>if surprise removal triggers after you submitted command
>but before it was used, no callback triggers and so no
>completion either.
Well. In that case
virtio_pci_remove()
-> unregister_virtio_device()
-> device_unregister()
-> virtnet_remove()
-> remove_vq_common()
-> virtio_reset_device()
-> vp_reset()
-> vp_modern_avq_cleanup()
which goes over these cmds and does complete
them all. See vp_modern_avq_cleanup() above.
What am I missing?
>
>
>>
>> >You need to add a callback and signal the completion.
>>
>> Callback to what?
>
>for example, remove callback can signal completion.
>
>
>>
>>
>>
>> >
>> >--
>> >MST
>> >
>
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
2024-07-11 8:10 ` Jiri Pirko
@ 2024-07-11 8:19 ` Michael S. Tsirkin
2024-07-11 12:46 ` Jiri Pirko
0 siblings, 1 reply; 24+ messages in thread
From: Michael S. Tsirkin @ 2024-07-11 8:19 UTC (permalink / raw)
To: Jiri Pirko
Cc: virtualization, jasowang, xuanzhuo, eperezma, parav, feliu,
hengqi
On Thu, Jul 11, 2024 at 10:10:57AM +0200, Jiri Pirko wrote:
> Wed, Jul 10, 2024 at 03:06:57PM CEST, mst@redhat.com wrote:
> >On Wed, Jul 10, 2024 at 03:03:33PM +0200, Jiri Pirko wrote:
> >> Wed, Jul 10, 2024 at 01:47:22PM CEST, mst@redhat.com wrote:
> >> >On Wed, Jul 10, 2024 at 08:36:00AM +0200, Jiri Pirko wrote:
> >> >> From: Jiri Pirko <jiri@nvidia.com>
> >> >>
> >> >> Currently, the code waits in a busy loop on every admin virtqueue issued
> >> >> command to get a reply. That prevents callers from issuing multiple
> >> >> commands in parallel.
> >> >>
> >> >> To overcome this limitation, introduce a virtqueue event callback for
> >> >> admin virtqueue. For every issued command, use completion mechanism
> >> >> to wait on a reply. In the event callback, trigger the completion
> >> >> is done for every incoming reply.
> >> >>
> >> >> Alongside with that, introduce a spin lock to protect the admin
> >> >> virtqueue operations.
> >> >>
> >> >> Signed-off-by: Jiri Pirko <jiri@nvidia.com>
> >> >> ---
> >> >> v1->v2:
> >> >> - rebased on top of newly added patches
> >> >> - rebased on top of changes in previous patches (vq info, vqs[])
> >> >> - removed WARN_ON_ONCE() when calling virtqueue_kick()
> >> >> - added virtqueue_is_broken check in virtqueue_exec_admin_cmd() loop
> >> >> - added vp_modern_avq_cleanup() implementation to handle surprise
> >> >> removal case
> >> >> ---
> >> >> drivers/virtio/virtio_pci_common.c | 13 ++++--
> >> >> drivers/virtio/virtio_pci_common.h | 3 ++
> >> >> drivers/virtio/virtio_pci_modern.c | 74 +++++++++++++++++++++++++-----
> >> >> include/linux/virtio.h | 3 ++
> >> >> 4 files changed, 77 insertions(+), 16 deletions(-)
> >> >>
> >> >> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> >> >> index 267643bb1cd5..c44d8ba00c02 100644
> >> >> --- a/drivers/virtio/virtio_pci_common.c
> >> >> +++ b/drivers/virtio/virtio_pci_common.c
> >> >> @@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
> >> >> if (vqi->name && vqi->callback)
> >> >> ++nvectors;
> >> >> }
> >> >> + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
> >> >> + ++nvectors;
> >> >> } else {
> >> >> /* Second best: one for change, shared for all vqs. */
> >> >> nvectors = 2;
> >> >> @@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
> >> >> if (!avq_num)
> >> >> return 0;
> >> >> sprintf(avq->name, "avq.%u", avq->vq_index);
> >> >> - vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
> >> >> - true, &allocated_vectors, vector_policy,
> >> >> - &vp_dev->admin_vq.info);
> >> >> + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
> >> >> + avq->name, false, true, &allocated_vectors,
> >> >> + vector_policy, &vp_dev->admin_vq.info);
> >> >> if (IS_ERR(vq)) {
> >> >> err = PTR_ERR(vq);
> >> >> goto error_find;
> >> >> @@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
> >> >> if (!avq_num)
> >> >> return 0;
> >> >> sprintf(avq->name, "avq.%u", avq->vq_index);
> >> >> - vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
> >> >> - VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
> >> >> + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
> >> >> + false, VIRTIO_MSI_NO_VECTOR,
> >> >> + &vp_dev->admin_vq.info);
> >> >> if (IS_ERR(vq)) {
> >> >> err = PTR_ERR(vq);
> >> >> goto out_del_vqs;
> >> >> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
> >> >> index de59bb06ec3c..90df381fbbcf 100644
> >> >> --- a/drivers/virtio/virtio_pci_common.h
> >> >> +++ b/drivers/virtio/virtio_pci_common.h
> >> >> @@ -47,6 +47,8 @@ struct virtio_pci_admin_vq {
> >> >> struct virtio_pci_vq_info *info;
> >> >> /* serializing admin commands execution. */
> >> >> struct mutex cmd_lock;
> >> >> + /* Protects virtqueue access. */
> >> >> + spinlock_t lock;
> >> >> u64 supported_cmds;
> >> >> /* Name of the admin queue: avq.$vq_index. */
> >> >> char name[10];
> >> >> @@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
> >> >> #define VIRTIO_ADMIN_CMD_BITMAP 0
> >> >> #endif
> >> >>
> >> >> +void vp_modern_avq_done(struct virtqueue *vq);
> >> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
> >> >> struct virtio_admin_cmd *cmd);
> >> >>
> >> >> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> >> >> index 0fd344d1eaf9..608df3263df1 100644
> >> >> --- a/drivers/virtio/virtio_pci_modern.c
> >> >> +++ b/drivers/virtio/virtio_pci_modern.c
> >> >> @@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
> >> >> return index == vp_dev->admin_vq.vq_index;
> >> >> }
> >> >>
> >> >> +void vp_modern_avq_done(struct virtqueue *vq)
> >> >> +{
> >> >> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> >> >> + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
> >> >> + struct virtio_admin_cmd *cmd;
> >> >> + unsigned long flags;
> >> >> + unsigned int len;
> >> >> +
> >> >> + spin_lock_irqsave(&admin_vq->lock, flags);
> >> >> + do {
> >> >> + virtqueue_disable_cb(vq);
> >> >> + while ((cmd = virtqueue_get_buf(vq, &len)))
> >> >> + complete(&cmd->completion);
> >> >> + } while (!virtqueue_enable_cb(vq));
> >> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> >> >> +}
> >> >> +
> >> >> static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> >> >> u16 opcode,
> >> >> struct scatterlist **sgs,
> >> >> @@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> >> >> struct virtio_admin_cmd *cmd)
> >> >> {
> >> >> struct virtqueue *vq;
> >> >> - int ret, len;
> >> >> + unsigned long flags;
> >> >> + int ret;
> >> >>
> >> >> vq = admin_vq->info->vq;
> >> >> if (!vq)
> >> >> @@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
> >> >> !((1ULL << opcode) & admin_vq->supported_cmds))
> >> >> return -EOPNOTSUPP;
> >> >>
> >> >> - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
> >> >> - if (ret < 0)
> >> >> - return -EIO;
> >> >> + init_completion(&cmd->completion);
> >> >>
> >> >> - if (unlikely(!virtqueue_kick(vq)))
> >> >> +again:
> >> >> + if (virtqueue_is_broken(vq))
> >> >> return -EIO;
> >> >>
> >> >> - while (!virtqueue_get_buf(vq, &len) &&
> >> >> - !virtqueue_is_broken(vq))
> >> >> - cpu_relax();
> >> >> + spin_lock_irqsave(&admin_vq->lock, flags);
> >> >> + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
> >> >> + if (ret < 0) {
> >> >> + if (ret == -ENOSPC) {
> >> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> >> >> + cpu_relax();
> >> >> + goto again;
> >> >> + }
> >> >> + goto unlock_err;
> >> >> + }
> >> >> + if (!virtqueue_kick(vq))
> >> >> + goto unlock_err;
> >> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> >> >>
> >> >> - if (virtqueue_is_broken(vq))
> >> >> - return -EIO;
> >> >> + wait_for_completion(&cmd->completion);
> >> >>
> >> >> - return 0;
> >> >> + return cmd->ret;
> >> >> +
> >> >> +unlock_err:
> >> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
> >> >> + return -EIO;
> >> >> }
> >> >>
> >> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
> >> >> @@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
> >> >> virtio_pci_admin_cmd_list_init(vdev);
> >> >> }
> >> >>
> >> >> +static void vp_modern_avq_cleanup(struct virtio_device *vdev)
> >> >> +{
> >> >> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> >> >> + struct virtio_admin_cmd *cmd;
> >> >> + struct virtqueue *vq;
> >> >> +
> >> >> + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
> >> >> + return;
> >> >> +
> >> >> + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
> >> >> + if (!vq)
> >> >> + return;
> >> >> +
> >> >> + while ((cmd = virtqueue_detach_unused_buf(vq))) {
> >> >> + cmd->ret = -EIO;
> >> >> + complete(&cmd->completion);
> >> >> + }
> >> >> +}
> >> >> +
> >> >
> >> >
> >> >I think surprise removal is still broken with this.
> >>
> >> Why do you think so? I will drain the remaining buffers from the queue
> >> and complete them all. What's missing?
> >
> >You have this:
> >
> >> >> + wait_for_completion(&cmd->completion);
> >
> >if surprise removal triggers after you submitted command
> >but before it was used, no callback triggers and so no
> >completion either.
>
> Well. In that case
>
> virtio_pci_remove()
> -> unregister_virtio_device()
> -> device_unregister()
> -> virtnet_remove()
> -> remove_vq_common()
> -> virtio_reset_device()
> -> vp_reset()
> -> vp_modern_avq_cleanup()
> which goes over these cmds and does complete
> them all. See vp_modern_avq_cleanup() above.
>
> What am I missing?
Oh, you are right. Won't work for cvq but that's
a separate issue.
>
> >
> >
> >>
> >> >You need to add a callback and signal the completion.
> >>
> >> Callback to what?
> >
> >for example, remove callback can signal completion.
> >
> >
> >>
> >>
> >>
> >> >
> >> >--
> >> >MST
> >> >
> >
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result
2024-07-11 8:19 ` Michael S. Tsirkin
@ 2024-07-11 12:46 ` Jiri Pirko
0 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-11 12:46 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: virtualization, jasowang, xuanzhuo, eperezma, parav, feliu,
hengqi
Thu, Jul 11, 2024 at 10:19:42AM CEST, mst@redhat.com wrote:
>On Thu, Jul 11, 2024 at 10:10:57AM +0200, Jiri Pirko wrote:
>> Wed, Jul 10, 2024 at 03:06:57PM CEST, mst@redhat.com wrote:
>> >On Wed, Jul 10, 2024 at 03:03:33PM +0200, Jiri Pirko wrote:
>> >> Wed, Jul 10, 2024 at 01:47:22PM CEST, mst@redhat.com wrote:
>> >> >On Wed, Jul 10, 2024 at 08:36:00AM +0200, Jiri Pirko wrote:
>> >> >> From: Jiri Pirko <jiri@nvidia.com>
>> >> >>
>> >> >> Currently, the code waits in a busy loop on every admin virtqueue issued
>> >> >> command to get a reply. That prevents callers from issuing multiple
>> >> >> commands in parallel.
>> >> >>
>> >> >> To overcome this limitation, introduce a virtqueue event callback for
>> >> >> admin virtqueue. For every issued command, use completion mechanism
>> >> >> to wait on a reply. In the event callback, trigger the completion
>> >> >> is done for every incoming reply.
>> >> >>
>> >> >> Alongside with that, introduce a spin lock to protect the admin
>> >> >> virtqueue operations.
>> >> >>
>> >> >> Signed-off-by: Jiri Pirko <jiri@nvidia.com>
>> >> >> ---
>> >> >> v1->v2:
>> >> >> - rebased on top of newly added patches
>> >> >> - rebased on top of changes in previous patches (vq info, vqs[])
>> >> >> - removed WARN_ON_ONCE() when calling virtqueue_kick()
>> >> >> - added virtqueue_is_broken check in virtqueue_exec_admin_cmd() loop
>> >> >> - added vp_modern_avq_cleanup() implementation to handle surprise
>> >> >> removal case
>> >> >> ---
>> >> >> drivers/virtio/virtio_pci_common.c | 13 ++++--
>> >> >> drivers/virtio/virtio_pci_common.h | 3 ++
>> >> >> drivers/virtio/virtio_pci_modern.c | 74 +++++++++++++++++++++++++-----
>> >> >> include/linux/virtio.h | 3 ++
>> >> >> 4 files changed, 77 insertions(+), 16 deletions(-)
>> >> >>
>> >> >> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
>> >> >> index 267643bb1cd5..c44d8ba00c02 100644
>> >> >> --- a/drivers/virtio/virtio_pci_common.c
>> >> >> +++ b/drivers/virtio/virtio_pci_common.c
>> >> >> @@ -395,6 +395,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
>> >> >> if (vqi->name && vqi->callback)
>> >> >> ++nvectors;
>> >> >> }
>> >> >> + if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
>> >> >> + ++nvectors;
>> >> >> } else {
>> >> >> /* Second best: one for change, shared for all vqs. */
>> >> >> nvectors = 2;
>> >> >> @@ -425,9 +427,9 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
>> >> >> if (!avq_num)
>> >> >> return 0;
>> >> >> sprintf(avq->name, "avq.%u", avq->vq_index);
>> >> >> - vq = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name, false,
>> >> >> - true, &allocated_vectors, vector_policy,
>> >> >> - &vp_dev->admin_vq.info);
>> >> >> + vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
>> >> >> + avq->name, false, true, &allocated_vectors,
>> >> >> + vector_policy, &vp_dev->admin_vq.info);
>> >> >> if (IS_ERR(vq)) {
>> >> >> err = PTR_ERR(vq);
>> >> >> goto error_find;
>> >> >> @@ -486,8 +488,9 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
>> >> >> if (!avq_num)
>> >> >> return 0;
>> >> >> sprintf(avq->name, "avq.%u", avq->vq_index);
>> >> >> - vq = vp_setup_vq(vdev, queue_idx++, NULL, avq->name, false,
>> >> >> - VIRTIO_MSI_NO_VECTOR, &vp_dev->admin_vq.info);
>> >> >> + vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
>> >> >> + false, VIRTIO_MSI_NO_VECTOR,
>> >> >> + &vp_dev->admin_vq.info);
>> >> >> if (IS_ERR(vq)) {
>> >> >> err = PTR_ERR(vq);
>> >> >> goto out_del_vqs;
>> >> >> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
>> >> >> index de59bb06ec3c..90df381fbbcf 100644
>> >> >> --- a/drivers/virtio/virtio_pci_common.h
>> >> >> +++ b/drivers/virtio/virtio_pci_common.h
>> >> >> @@ -47,6 +47,8 @@ struct virtio_pci_admin_vq {
>> >> >> struct virtio_pci_vq_info *info;
>> >> >> /* serializing admin commands execution. */
>> >> >> struct mutex cmd_lock;
>> >> >> + /* Protects virtqueue access. */
>> >> >> + spinlock_t lock;
>> >> >> u64 supported_cmds;
>> >> >> /* Name of the admin queue: avq.$vq_index. */
>> >> >> char name[10];
>> >> >> @@ -178,6 +180,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
>> >> >> #define VIRTIO_ADMIN_CMD_BITMAP 0
>> >> >> #endif
>> >> >>
>> >> >> +void vp_modern_avq_done(struct virtqueue *vq);
>> >> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
>> >> >> struct virtio_admin_cmd *cmd);
>> >> >>
>> >> >> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
>> >> >> index 0fd344d1eaf9..608df3263df1 100644
>> >> >> --- a/drivers/virtio/virtio_pci_modern.c
>> >> >> +++ b/drivers/virtio/virtio_pci_modern.c
>> >> >> @@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
>> >> >> return index == vp_dev->admin_vq.vq_index;
>> >> >> }
>> >> >>
>> >> >> +void vp_modern_avq_done(struct virtqueue *vq)
>> >> >> +{
>> >> >> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
>> >> >> + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
>> >> >> + struct virtio_admin_cmd *cmd;
>> >> >> + unsigned long flags;
>> >> >> + unsigned int len;
>> >> >> +
>> >> >> + spin_lock_irqsave(&admin_vq->lock, flags);
>> >> >> + do {
>> >> >> + virtqueue_disable_cb(vq);
>> >> >> + while ((cmd = virtqueue_get_buf(vq, &len)))
>> >> >> + complete(&cmd->completion);
>> >> >> + } while (!virtqueue_enable_cb(vq));
>> >> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> >> >> +}
>> >> >> +
>> >> >> static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> >> >> u16 opcode,
>> >> >> struct scatterlist **sgs,
>> >> >> @@ -61,7 +78,8 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> >> >> struct virtio_admin_cmd *cmd)
>> >> >> {
>> >> >> struct virtqueue *vq;
>> >> >> - int ret, len;
>> >> >> + unsigned long flags;
>> >> >> + int ret;
>> >> >>
>> >> >> vq = admin_vq->info->vq;
>> >> >> if (!vq)
>> >> >> @@ -72,21 +90,33 @@ static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
>> >> >> !((1ULL << opcode) & admin_vq->supported_cmds))
>> >> >> return -EOPNOTSUPP;
>> >> >>
>> >> >> - ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
>> >> >> - if (ret < 0)
>> >> >> - return -EIO;
>> >> >> + init_completion(&cmd->completion);
>> >> >>
>> >> >> - if (unlikely(!virtqueue_kick(vq)))
>> >> >> +again:
>> >> >> + if (virtqueue_is_broken(vq))
>> >> >> return -EIO;
>> >> >>
>> >> >> - while (!virtqueue_get_buf(vq, &len) &&
>> >> >> - !virtqueue_is_broken(vq))
>> >> >> - cpu_relax();
>> >> >> + spin_lock_irqsave(&admin_vq->lock, flags);
>> >> >> + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
>> >> >> + if (ret < 0) {
>> >> >> + if (ret == -ENOSPC) {
>> >> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> >> >> + cpu_relax();
>> >> >> + goto again;
>> >> >> + }
>> >> >> + goto unlock_err;
>> >> >> + }
>> >> >> + if (!virtqueue_kick(vq))
>> >> >> + goto unlock_err;
>> >> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> >> >>
>> >> >> - if (virtqueue_is_broken(vq))
>> >> >> - return -EIO;
>> >> >> + wait_for_completion(&cmd->completion);
>> >> >>
>> >> >> - return 0;
>> >> >> + return cmd->ret;
>> >> >> +
>> >> >> +unlock_err:
>> >> >> + spin_unlock_irqrestore(&admin_vq->lock, flags);
>> >> >> + return -EIO;
>> >> >> }
>> >> >>
>> >> >> int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
>> >> >> @@ -209,6 +239,25 @@ static void vp_modern_avq_activate(struct virtio_device *vdev)
>> >> >> virtio_pci_admin_cmd_list_init(vdev);
>> >> >> }
>> >> >>
>> >> >> +static void vp_modern_avq_cleanup(struct virtio_device *vdev)
>> >> >> +{
>> >> >> + struct virtio_pci_device *vp_dev = to_vp_device(vdev);
>> >> >> + struct virtio_admin_cmd *cmd;
>> >> >> + struct virtqueue *vq;
>> >> >> +
>> >> >> + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
>> >> >> + return;
>> >> >> +
>> >> >> + vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
>> >> >> + if (!vq)
>> >> >> + return;
>> >> >> +
>> >> >> + while ((cmd = virtqueue_detach_unused_buf(vq))) {
>> >> >> + cmd->ret = -EIO;
>> >> >> + complete(&cmd->completion);
>> >> >> + }
>> >> >> +}
>> >> >> +
>> >> >
>> >> >
>> >> >I think surprise removal is still broken with this.
>> >>
>> >> Why do you think so? I will drain the remaining buffers from the queue
>> >> and complete them all. What's missing?
>> >
>> >You have this:
>> >
>> >> >> + wait_for_completion(&cmd->completion);
>> >
>> >if surprise removal triggers after you submitted command
>> >but before it was used, no callback triggers and so no
>> >completion either.
>>
>> Well. In that case
>>
>> virtio_pci_remove()
>> -> unregister_virtio_device()
>> -> device_unregister()
>> -> virtnet_remove()
>> -> remove_vq_common()
>> -> virtio_reset_device()
>> -> vp_reset()
>> -> vp_modern_avq_cleanup()
>> which goes over these cmds and does complete
>> them all. See vp_modern_avq_cleanup() above.
>>
>> What am I missing?
>
>Oh, you are right. Won't work for cvq but that's
>a separate issue.
True, for cvq, this will need to be handled in:
remove_vq_common(), similar to free_unused_bufs()
>
>
>>
>> >
>> >
>> >>
>> >> >You need to add a callback and signal the completion.
>> >>
>> >> Callback to what?
>> >
>> >for example, remove callback can signal completion.
>> >
>> >
>> >>
>> >>
>> >>
>> >> >
>> >> >--
>> >> >MST
>> >> >
>> >
>
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH virtio v2 13/13] virtio_pci_modern: remove admin queue serialization lock
2024-07-10 6:35 [PATCH virtio v2 00/13] virtio_pci_modern: allow parallel admin queue commands execution Jiri Pirko
` (11 preceding siblings ...)
2024-07-10 6:36 ` [PATCH virtio v2 12/13] virtio_pci_modern: use completion instead of busy loop to wait on admin cmd result Jiri Pirko
@ 2024-07-10 6:36 ` Jiri Pirko
12 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-07-10 6:36 UTC (permalink / raw)
To: virtualization; +Cc: mst, jasowang, xuanzhuo, eperezma, parav, feliu, hengqi
From: Jiri Pirko <jiri@nvidia.com>
The admin queue operations are protected by newly introduced
spin lock. To make it possible to issue parallel commands, remove the
admin queue serialization lock.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
---
v1->v2:
- rebased on top of changes in previous patches (vq info)
---
drivers/virtio/virtio_pci_common.h | 2 --
drivers/virtio/virtio_pci_modern.c | 5 -----
2 files changed, 7 deletions(-)
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 90df381fbbcf..1d9c49947f52 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -45,8 +45,6 @@ struct virtio_pci_vq_info {
struct virtio_pci_admin_vq {
/* Virtqueue info associated with this admin queue. */
struct virtio_pci_vq_info *info;
- /* serializing admin commands execution. */
- struct mutex cmd_lock;
/* Protects virtqueue access. */
spinlock_t lock;
u64 supported_cmds;
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 608df3263df1..9193c30d640a 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -167,12 +167,9 @@ int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
in_num++;
}
- mutex_lock(&vp_dev->admin_vq.cmd_lock);
ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
le16_to_cpu(cmd->opcode),
sgs, out_num, in_num, cmd);
- mutex_unlock(&vp_dev->admin_vq.cmd_lock);
-
if (ret) {
dev_err(&vdev->dev,
"Failed to execute command on admin vq: %d\n.", ret);
@@ -837,7 +834,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
vp_dev->vdev.id = mdev->id;
spin_lock_init(&vp_dev->admin_vq.lock);
- mutex_init(&vp_dev->admin_vq.cmd_lock);
return 0;
}
@@ -845,6 +841,5 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
{
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
- mutex_destroy(&vp_dev->admin_vq.cmd_lock);
vp_modern_remove(mdev);
}
--
2.45.2
^ permalink raw reply related [flat|nested] 24+ messages in thread