* [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature
@ 2022-01-26 12:49 Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 1/4] vDPA/ifcvf: implement IO read/write helpers in the header file Zhu Lingshan
` (4 more replies)
0 siblings, 5 replies; 8+ messages in thread
From: Zhu Lingshan @ 2022-01-26 12:49 UTC (permalink / raw)
To: mst, jasowang; +Cc: netdev, virtualization, Zhu Lingshan
It has been observed that on some platforms/devices, there may
not be enough MSI vectors for virtqueues and the config change.
Under such circumstances, the interrupt sources of a device
have to share vectors/IRQs.
This series implemented a shared IRQ feature for ifcvf.
Please help review.
Changes from V2:
(1) Fix misuse of nvectors(in ifcvf_alloc_vectors return value)(Michael)
(2) Fix misuse of irq = get_vq_irq() in setup irqbypass(Michael)
(3) Coding style improvements(Michael)
(4) Better naming of device shared irq/shared vq irq
Changes from V1:
(1) Enable config interrupt when only one vector is allocated(Michael)
(2) Clean vectors/IRQs if failed to request config interrupt
since config interrupt is a must(Michael)
(3) Keep local vdpa_ops, disable irq_bypass by setting IRQ = -EINVAL
for shared IRQ case(Michael)
(4) Improvements on error messages(Michael)
(5) Squash functions implementation patches to the callers(Michael)
Zhu Lingshan (4):
vDPA/ifcvf: implement IO read/write helpers in the header file
vDPA/ifcvf: implement device MSIX vector allocator
vhost_vdpa: don't setup irq offloading when irq_num < 0
vDPA/ifcvf: implement shared IRQ feature
drivers/vdpa/ifcvf/ifcvf_base.c | 67 +++-----
drivers/vdpa/ifcvf/ifcvf_base.h | 60 +++++++-
drivers/vdpa/ifcvf/ifcvf_main.c | 260 ++++++++++++++++++++++++++++----
drivers/vhost/vdpa.c | 4 +
4 files changed, 312 insertions(+), 79 deletions(-)
--
2.27.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH V3 1/4] vDPA/ifcvf: implement IO read/write helpers in the header file
2022-01-26 12:49 [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
@ 2022-01-26 12:49 ` Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 2/4] vDPA/ifcvf: implement device MSIX vector allocator Zhu Lingshan
` (3 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Zhu Lingshan @ 2022-01-26 12:49 UTC (permalink / raw)
To: mst, jasowang; +Cc: netdev, virtualization, Zhu Lingshan
re-implement IO read/write helpers in the header file, so that
they can be utilized among modules.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
---
drivers/vdpa/ifcvf/ifcvf_base.c | 36 --------------------------------
drivers/vdpa/ifcvf/ifcvf_base.h | 37 +++++++++++++++++++++++++++++++++
2 files changed, 37 insertions(+), 36 deletions(-)
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 7d41dfe48ade..397692ae671c 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -10,42 +10,6 @@
#include "ifcvf_base.h"
-static inline u8 ifc_ioread8(u8 __iomem *addr)
-{
- return ioread8(addr);
-}
-static inline u16 ifc_ioread16 (__le16 __iomem *addr)
-{
- return ioread16(addr);
-}
-
-static inline u32 ifc_ioread32(__le32 __iomem *addr)
-{
- return ioread32(addr);
-}
-
-static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
-{
- iowrite8(value, addr);
-}
-
-static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
-{
- iowrite16(value, addr);
-}
-
-static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
-{
- iowrite32(value, addr);
-}
-
-static void ifc_iowrite64_twopart(u64 val,
- __le32 __iomem *lo, __le32 __iomem *hi)
-{
- ifc_iowrite32((u32)val, lo);
- ifc_iowrite32(val >> 32, hi);
-}
-
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
{
return container_of(hw, struct ifcvf_adapter, vf);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index c486873f370a..949b4fb9d554 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -42,6 +42,43 @@
#define ifcvf_private_to_vf(adapter) \
(&((struct ifcvf_adapter *)adapter)->vf)
+static inline u8 ifc_ioread8(u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+
+static inline u16 ifc_ioread16(__le16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 ifc_ioread32(__le32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static inline void ifc_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo, __le32 __iomem *hi)
+{
+ ifc_iowrite32((u32)val, lo);
+ ifc_iowrite32(val >> 32, hi);
+}
+
struct vring_info {
u64 desc;
u64 avail;
--
2.27.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH V3 2/4] vDPA/ifcvf: implement device MSIX vector allocator
2022-01-26 12:49 [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 1/4] vDPA/ifcvf: implement IO read/write helpers in the header file Zhu Lingshan
@ 2022-01-26 12:49 ` Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 3/4] vhost_vdpa: don't setup irq offloading when irq_num < 0 Zhu Lingshan
` (2 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Zhu Lingshan @ 2022-01-26 12:49 UTC (permalink / raw)
To: mst, jasowang; +Cc: netdev, virtualization, Zhu Lingshan
This commit implements a MSIX vector allocation helper
for vqs and config interrupts.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
---
drivers/vdpa/ifcvf/ifcvf_main.c | 35 +++++++++++++++++++++++++++++++--
1 file changed, 33 insertions(+), 2 deletions(-)
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index d1a6b5ab543c..44c89ab0b6da 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -58,14 +58,45 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
ifcvf_free_irq_vectors(pdev);
}
+/* ifcvf MSIX vectors allocator, this helper tries to allocate
+ * vectors for all virtqueues and the config interrupt.
+ * It returns the number of allocated vectors, negative
+ * return value when fails.
+ */
+static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+ int max_intr, ret;
+
+ /* all queues and config interrupt */
+ max_intr = vf->nr_vring + 1;
+ ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+
+ if (ret < 0) {
+ IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
+ return ret;
+ }
+
+ if (ret < max_intr)
+ IFCVF_INFO(pdev,
+ "Requested %u vectors, however only %u allocated, lower performance\n",
+ max_intr, ret);
+
+ return ret;
+}
+
static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct ifcvf_hw *vf = &adapter->vf;
- int vector, i, ret, irq;
+ int vector, nvectors, i, ret, irq;
u16 max_intr;
- /* all queues and config interrupt */
+ nvectors = ifcvf_alloc_vectors(adapter);
+ if (!(nvectors > 0))
+ return nvectors;
+
max_intr = vf->nr_vring + 1;
ret = pci_alloc_irq_vectors(pdev, max_intr,
--
2.27.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH V3 3/4] vhost_vdpa: don't setup irq offloading when irq_num < 0
2022-01-26 12:49 [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 1/4] vDPA/ifcvf: implement IO read/write helpers in the header file Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 2/4] vDPA/ifcvf: implement device MSIX vector allocator Zhu Lingshan
@ 2022-01-26 12:49 ` Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 4/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
2022-01-26 14:14 ` [PATCH V3 0/4] " Michael S. Tsirkin
4 siblings, 0 replies; 8+ messages in thread
From: Zhu Lingshan @ 2022-01-26 12:49 UTC (permalink / raw)
To: mst, jasowang; +Cc: netdev, virtualization, Zhu Lingshan
When irq number is negative(e.g., -EINVAL), the virtqueue
may be disabled or the virtqueues are sharing a device irq.
In such case, we should not setup irq offloading for a virtqueue.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
---
drivers/vhost/vdpa.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 851539807bc9..c4fcacb0de3a 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -96,6 +96,10 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
if (!ops->get_vq_irq)
return;
+ irq = ops->get_vq_irq(vdpa, qid);
+ if (irq < 0)
+ return;
+
irq = ops->get_vq_irq(vdpa, qid);
irq_bypass_unregister_producer(&vq->call_ctx.producer);
if (!vq->call_ctx.ctx || irq < 0)
--
2.27.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH V3 4/4] vDPA/ifcvf: implement shared IRQ feature
2022-01-26 12:49 [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
` (2 preceding siblings ...)
2022-01-26 12:49 ` [PATCH V3 3/4] vhost_vdpa: don't setup irq offloading when irq_num < 0 Zhu Lingshan
@ 2022-01-26 12:49 ` Zhu Lingshan
2022-01-26 14:14 ` [PATCH V3 0/4] " Michael S. Tsirkin
4 siblings, 0 replies; 8+ messages in thread
From: Zhu Lingshan @ 2022-01-26 12:49 UTC (permalink / raw)
To: mst, jasowang; +Cc: netdev, virtualization, Zhu Lingshan
On some platforms/devices, there may not be enough MSI vector
slots allocated for virtqueues and config changes. In such a case,
the interrupt sources(virtqueues, config changes) must share
an IRQ/vector, to avoid initialization failures, keep
the device functional.
This commit handles three cases:
(1) number of the allocated vectors == the number of virtqueues + 1
(config changes), every virtqueue and the config interrupt has
a separated vector/IRQ, the best and the most likely case.
(2) number of the allocated vectors is less than the best case, but
greater than 1. In this case, all virtqueues share a vector/IRQ,
the config interrupt has a separated vector/IRQ
(3) only one vector is allocated, in this case, the virtqueues and
the config interrupt share a vector/IRQ. The worst and most
unlikely case.
Otherwise, it needs to fail.
This commit introduces some helper functions:
ifcvf_set_vq_vector() and ifcvf_set_config_vector() sets virtqueue
vector and config vector in the device config space, so that
the device can send interrupt DMA.
This commit adds some fields in struct ifcvf_hw and re-placed
the existed fields to be aligned with the cacheline.
Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com>
---
drivers/vdpa/ifcvf/ifcvf_base.c | 47 +++++--
drivers/vdpa/ifcvf/ifcvf_base.h | 23 ++-
drivers/vdpa/ifcvf/ifcvf_main.c | 241 +++++++++++++++++++++++++++-----
3 files changed, 254 insertions(+), 57 deletions(-)
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 397692ae671c..18dcb63ab1e3 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -15,6 +15,36 @@ struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
return container_of(hw, struct ifcvf_adapter, vf);
}
+int ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ struct ifcvf_adapter *ifcvf = vf_to_adapter(hw);
+
+ ifc_iowrite16(qid, &cfg->queue_select);
+ ifc_iowrite16(vector, &cfg->queue_msix_vector);
+ if (ifc_ioread16(&cfg->queue_msix_vector) == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(ifcvf->pdev, "No msix vector for queue %u\n", qid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ struct ifcvf_adapter *ifcvf = vf_to_adapter(hw);
+
+ cfg = hw->common_cfg;
+ ifc_iowrite16(vector, &cfg->msix_config);
+ if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
struct virtio_pci_cap *cap)
{
@@ -140,6 +170,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
hw->common_cfg, hw->notify_base, hw->isr,
hw->dev_cfg, hw->notify_off_multiplier);
+ hw->vqs_shared_irq = -EINVAL;
+
return 0;
}
@@ -321,12 +353,6 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw)
ifcvf = vf_to_adapter(hw);
cfg = hw->common_cfg;
- ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
-
- if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
- IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
- return -EINVAL;
- }
for (i = 0; i < hw->nr_vring; i++) {
if (!hw->vring[i].ready)
@@ -340,15 +366,6 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw)
ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
&cfg->queue_used_hi);
ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
- ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
-
- if (ifc_ioread16(&cfg->queue_msix_vector) ==
- VIRTIO_MSI_NO_VECTOR) {
- IFCVF_ERR(ifcvf->pdev,
- "No msix vector for queue %u\n", i);
- return -EINVAL;
- }
-
ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
ifc_iowrite16(1, &cfg->queue_enable);
}
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 949b4fb9d554..9cfe088c82e9 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -27,8 +27,6 @@
#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
#define IFCVF_QUEUE_MAX 32768
-#define IFCVF_MSI_CONFIG_OFF 0
-#define IFCVF_MSI_QUEUE_OFF 1
#define IFCVF_PCI_MAX_RESOURCE 6
#define IFCVF_LM_CFG_SIZE 0x40
@@ -42,6 +40,13 @@
#define ifcvf_private_to_vf(adapter) \
(&((struct ifcvf_adapter *)adapter)->vf)
+/* all vqs and config interrupt has its own vector */
+#define MSIX_VECTOR_PER_VQ_AND_CONFIG 1
+/* all vqs share a vector, and config interrupt has a separate vector */
+#define MSIX_VECTOR_SHARED_VQ_AND_CONFIG 2
+/* all vqs and config interrupt share a vector */
+#define MSIX_VECTOR_DEV_SHARED 3
+
static inline u8 ifc_ioread8(u8 __iomem *addr)
{
return ioread8(addr);
@@ -97,25 +102,27 @@ struct ifcvf_hw {
u8 __iomem *isr;
/* Live migration */
u8 __iomem *lm_cfg;
- u16 nr_vring;
/* Notification bar number */
u8 notify_bar;
+ u8 msix_vector_status;
+ /* virtio-net or virtio-blk device config size */
+ u32 config_size;
/* Notificaiton bar address */
void __iomem *notify_base;
phys_addr_t notify_base_pa;
u32 notify_off_multiplier;
+ u32 dev_type;
u64 req_features;
u64 hw_features;
- u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
void __iomem *dev_cfg;
struct vring_info vring[IFCVF_MAX_QUEUES];
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
- unsigned int config_irq;
- /* virtio-net or virtio-blk device config size */
- u32 config_size;
+ int config_irq;
+ int vqs_shared_irq;
+ u16 nr_vring;
};
struct ifcvf_adapter {
@@ -160,4 +167,6 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
+int ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
+int ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 44c89ab0b6da..53f0033a018a 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -17,6 +17,7 @@
#define DRIVER_AUTHOR "Intel Corporation"
#define IFCVF_DRIVER_NAME "ifcvf"
+/* handles config interrupt */
static irqreturn_t ifcvf_config_changed(int irq, void *arg)
{
struct ifcvf_hw *vf = arg;
@@ -27,6 +28,7 @@ static irqreturn_t ifcvf_config_changed(int irq, void *arg)
return IRQ_HANDLED;
}
+/* handles vqs interrupt */
static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
{
struct vring_info *vring = arg;
@@ -37,24 +39,77 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+/* handls vqs shared interrupt */
+static irqreturn_t ifcvf_vq_shared_intr_handler(int irq, void *arg)
+{
+ struct ifcvf_hw *vf = arg;
+ struct vring_info *vring;
+ int i;
+
+ for (i = 0; i < vf->nr_vring; i++) {
+ vring = &vf->vring[i];
+ if (vring->cb.callback)
+ vf->vring->cb.callback(vring->cb.private);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* handles a shared interrupt for vqs and config */
+static irqreturn_t ifcvf_dev_shared_intr_handler(int irq, void *arg)
+{
+ struct ifcvf_hw *vf = arg;
+ u8 isr;
+
+ isr = ifc_ioread8(vf->isr);
+ if (isr & VIRTIO_PCI_ISR_CONFIG)
+ ifcvf_config_changed(irq, arg);
+
+ return ifcvf_vq_shared_intr_handler(irq, arg);
+}
+
static void ifcvf_free_irq_vectors(void *data)
{
pci_free_irq_vectors(data);
}
-static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
+static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter, int queues)
{
struct pci_dev *pdev = adapter->pdev;
struct ifcvf_hw *vf = &adapter->vf;
int i;
+ if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) {
+ for (i = 0; i < queues; i++) {
+ devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+ vf->vring[i].irq = -EINVAL;
+ }
+ } else {
+ devm_free_irq(&pdev->dev, vf->vqs_shared_irq, vf);
+ } vf->vqs_shared_irq = -EINVAL;
+}
- for (i = 0; i < queues; i++) {
- devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
- vf->vring[i].irq = -EINVAL;
+static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+
+ /* If the irq is shared by all vqs and the config interrupt,
+ * it is already freed in ifcvf_free_vq_irq, so here only
+ * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
+ */
+ if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
+ devm_free_irq(&pdev->dev, vf->config_irq, vf);
+ vf->config_irq = -EINVAL;
}
+}
+
+static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
+{
+ struct pci_dev *pdev = adapter->pdev;
- devm_free_irq(&pdev->dev, vf->config_irq, vf);
+ ifcvf_free_vq_irq(adapter, queues);
+ ifcvf_free_config_irq(adapter);
ifcvf_free_irq_vectors(pdev);
}
@@ -86,58 +141,171 @@ static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
return ret;
}
-static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct ifcvf_hw *vf = &adapter->vf;
- int vector, nvectors, i, ret, irq;
- u16 max_intr;
+ int i, vector, ret, irq;
- nvectors = ifcvf_alloc_vectors(adapter);
- if (!(nvectors > 0))
- return nvectors;
+ for (i = 0; i < vf->nr_vring; i++) {
+ snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
+ vector = i;
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_intr_handler, 0,
+ vf->vring[i].msix_name,
+ &vf->vring[i]);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
+ ifcvf_free_vq_irq(adapter, i);
+ } else {
+ vf->vring[i].irq = irq;
+ ifcvf_set_vq_vector(vf, i, vector);
+ }
+ }
- max_intr = vf->nr_vring + 1;
+ vf->vqs_shared_irq = -EINVAL;
+
+ return 0;
+}
+
+static int ifcvf_request_shared_vq_irq(struct ifcvf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+ int i, vector, ret, irq;
+
+ vector = 0;
+ /* reuse msix_name[256] space of vring0 to store shared vqs interrupt name */
+ snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-shared-irq\n", pci_name(pdev));
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_vq_shared_intr_handler, 0,
+ vf->vring[0].msix_name, vf);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request shared irq for vf\n");
+
+ return ret;
+ }
+
+ vf->vqs_shared_irq = irq;
+ for (i = 0; i < vf->nr_vring; i++) {
+ vf->vring[i].irq = -EINVAL;
+ ifcvf_set_vq_vector(vf, i, vector);
+ }
+
+ return 0;
+
+}
+
+static int ifcvf_request_dev_shared_irq(struct ifcvf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+ int i, vector, ret, irq;
+
+ vector = 0;
+ /* reuse msix_name[256] space of vring0 to store shared device interrupt name */
+ snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-shared-irq\n", pci_name(pdev));
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_dev_shared_intr_handler, 0,
+ vf->vring[0].msix_name, vf);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request shared irq for vf\n");
- ret = pci_alloc_irq_vectors(pdev, max_intr,
- max_intr, PCI_IRQ_MSIX);
- if (ret < 0) {
- IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
return ret;
}
+ vf->vqs_shared_irq = irq;
+ for (i = 0; i < vf->nr_vring; i++) {
+ vf->vring[i].irq = -EINVAL;
+ ifcvf_set_vq_vector(vf, i, vector);
+ }
+
+ vf->config_irq = irq;
+ ifcvf_set_config_vector(vf, vector);
+
+ return 0;
+
+}
+
+static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
+{
+ struct ifcvf_hw *vf = &adapter->vf;
+ int ret;
+
+ if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+ ret = ifcvf_request_per_vq_irq(adapter);
+ else
+ ret = ifcvf_request_shared_vq_irq(adapter);
+
+ return ret;
+}
+
+static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+ int config_vector, ret;
+
+ if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
+ return 0;
+
+ if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+ /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */
+ config_vector = vf->nr_vring;
+
+ if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
+ /* vector 0 for vqs and 1 for config interrupt */
+ config_vector = 1;
+
snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
pci_name(pdev));
- vector = 0;
- vf->config_irq = pci_irq_vector(pdev, vector);
+ vf->config_irq = pci_irq_vector(pdev, config_vector);
ret = devm_request_irq(&pdev->dev, vf->config_irq,
ifcvf_config_changed, 0,
vf->config_msix_name, vf);
if (ret) {
IFCVF_ERR(pdev, "Failed to request config irq\n");
+ ifcvf_free_vq_irq(adapter, vf->nr_vring);
return ret;
}
+ ifcvf_set_config_vector(vf, config_vector);
- for (i = 0; i < vf->nr_vring; i++) {
- snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
- pci_name(pdev), i);
- vector = i + IFCVF_MSI_QUEUE_OFF;
- irq = pci_irq_vector(pdev, vector);
- ret = devm_request_irq(&pdev->dev, irq,
- ifcvf_intr_handler, 0,
- vf->vring[i].msix_name,
- &vf->vring[i]);
- if (ret) {
- IFCVF_ERR(pdev,
- "Failed to request irq for vq %d\n", i);
- ifcvf_free_irq(adapter, i);
+ return 0;
+}
- return ret;
- }
+static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+{
+ struct ifcvf_hw *vf = &adapter->vf;
+ int nvectors, ret, max_intr;
- vf->vring[i].irq = irq;
+ nvectors = ifcvf_alloc_vectors(adapter);
+ if (!(nvectors > 0))
+ return nvectors;
+
+ vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
+ max_intr = vf->nr_vring + 1;
+ if (nvectors < max_intr)
+ vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
+
+ if (nvectors == 1) {
+ vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
+ ret = ifcvf_request_dev_shared_irq(adapter);
+
+ return ret;
}
+ ret = ifcvf_request_vq_irq(adapter);
+ if (ret)
+ return ret;
+
+ ret = ifcvf_request_config_irq(adapter);
+
+ if (ret)
+ return ret;
+
return 0;
}
@@ -441,7 +609,10 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- return vf->vring[qid].irq;
+ if (vf->vqs_shared_irq < 0)
+ return vf->vring[qid].irq;
+ else
+ return -EINVAL;
}
static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
--
2.27.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature
2022-01-26 12:49 [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
` (3 preceding siblings ...)
2022-01-26 12:49 ` [PATCH V3 4/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
@ 2022-01-26 14:14 ` Michael S. Tsirkin
2022-01-27 4:31 ` Zhu, Lingshan
4 siblings, 1 reply; 8+ messages in thread
From: Michael S. Tsirkin @ 2022-01-26 14:14 UTC (permalink / raw)
To: Zhu Lingshan; +Cc: jasowang, netdev, virtualization
On Wed, Jan 26, 2022 at 08:49:08PM +0800, Zhu Lingshan wrote:
> It has been observed that on some platforms/devices, there may
> not be enough MSI vectors for virtqueues and the config change.
> Under such circumstances, the interrupt sources of a device
> have to share vectors/IRQs.
>
> This series implemented a shared IRQ feature for ifcvf.
>
> Please help review.
Given the history, can you please report which tests
were performed with this patchset? Which configs tested?
Thanks?
> Changes from V2:
> (1) Fix misuse of nvectors(in ifcvf_alloc_vectors return value)(Michael)
> (2) Fix misuse of irq = get_vq_irq() in setup irqbypass(Michael)
> (3) Coding style improvements(Michael)
> (4) Better naming of device shared irq/shared vq irq
>
> Changes from V1:
> (1) Enable config interrupt when only one vector is allocated(Michael)
> (2) Clean vectors/IRQs if failed to request config interrupt
> since config interrupt is a must(Michael)
> (3) Keep local vdpa_ops, disable irq_bypass by setting IRQ = -EINVAL
> for shared IRQ case(Michael)
> (4) Improvements on error messages(Michael)
> (5) Squash functions implementation patches to the callers(Michael)
>
> Zhu Lingshan (4):
> vDPA/ifcvf: implement IO read/write helpers in the header file
> vDPA/ifcvf: implement device MSIX vector allocator
> vhost_vdpa: don't setup irq offloading when irq_num < 0
> vDPA/ifcvf: implement shared IRQ feature
>
> drivers/vdpa/ifcvf/ifcvf_base.c | 67 +++-----
> drivers/vdpa/ifcvf/ifcvf_base.h | 60 +++++++-
> drivers/vdpa/ifcvf/ifcvf_main.c | 260 ++++++++++++++++++++++++++++----
> drivers/vhost/vdpa.c | 4 +
> 4 files changed, 312 insertions(+), 79 deletions(-)
>
> --
> 2.27.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature
2022-01-26 14:14 ` [PATCH V3 0/4] " Michael S. Tsirkin
@ 2022-01-27 4:31 ` Zhu, Lingshan
2022-01-27 8:27 ` Zhu, Lingshan
0 siblings, 1 reply; 8+ messages in thread
From: Zhu, Lingshan @ 2022-01-27 4:31 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: jasowang, netdev, virtualization
On 1/26/2022 10:14 PM, Michael S. Tsirkin wrote:
> On Wed, Jan 26, 2022 at 08:49:08PM +0800, Zhu Lingshan wrote:
>> It has been observed that on some platforms/devices, there may
>> not be enough MSI vectors for virtqueues and the config change.
>> Under such circumstances, the interrupt sources of a device
>> have to share vectors/IRQs.
>>
>> This series implemented a shared IRQ feature for ifcvf.
>>
>> Please help review.
> Given the history, can you please report which tests
> were performed with this patchset? Which configs tested?
> Thanks?
Hi Michael,
It is ping and netperf tests, and I have set nvectors = 1 and 2 in
ifcvf_request_irq(), after ifcvf_alloc_vectors(),
to hard coded the number of the allocate vectors.
Thanks,
Zhu Lingshan
>
>> Changes from V2:
>> (1) Fix misuse of nvectors(in ifcvf_alloc_vectors return value)(Michael)
>> (2) Fix misuse of irq = get_vq_irq() in setup irqbypass(Michael)
>> (3) Coding style improvements(Michael)
>> (4) Better naming of device shared irq/shared vq irq
>>
>> Changes from V1:
>> (1) Enable config interrupt when only one vector is allocated(Michael)
>> (2) Clean vectors/IRQs if failed to request config interrupt
>> since config interrupt is a must(Michael)
>> (3) Keep local vdpa_ops, disable irq_bypass by setting IRQ = -EINVAL
>> for shared IRQ case(Michael)
>> (4) Improvements on error messages(Michael)
>> (5) Squash functions implementation patches to the callers(Michael)
>>
>> Zhu Lingshan (4):
>> vDPA/ifcvf: implement IO read/write helpers in the header file
>> vDPA/ifcvf: implement device MSIX vector allocator
>> vhost_vdpa: don't setup irq offloading when irq_num < 0
>> vDPA/ifcvf: implement shared IRQ feature
>>
>> drivers/vdpa/ifcvf/ifcvf_base.c | 67 +++-----
>> drivers/vdpa/ifcvf/ifcvf_base.h | 60 +++++++-
>> drivers/vdpa/ifcvf/ifcvf_main.c | 260 ++++++++++++++++++++++++++++----
>> drivers/vhost/vdpa.c | 4 +
>> 4 files changed, 312 insertions(+), 79 deletions(-)
>>
>> --
>> 2.27.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature
2022-01-27 4:31 ` Zhu, Lingshan
@ 2022-01-27 8:27 ` Zhu, Lingshan
0 siblings, 0 replies; 8+ messages in thread
From: Zhu, Lingshan @ 2022-01-27 8:27 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: jasowang, netdev, virtualization
On 1/27/2022 12:31 PM, Zhu, Lingshan wrote:
>
>
> On 1/26/2022 10:14 PM, Michael S. Tsirkin wrote:
>> On Wed, Jan 26, 2022 at 08:49:08PM +0800, Zhu Lingshan wrote:
>>> It has been observed that on some platforms/devices, there may
>>> not be enough MSI vectors for virtqueues and the config change.
>>> Under such circumstances, the interrupt sources of a device
>>> have to share vectors/IRQs.
>>>
>>> This series implemented a shared IRQ feature for ifcvf.
>>>
>>> Please help review.
>> Given the history, can you please report which tests
>> were performed with this patchset? Which configs tested?
>> Thanks?
> Hi Michael,
>
> It is ping and netperf tests, and I have set nvectors = 1 and 2 in
> ifcvf_request_irq(), after ifcvf_alloc_vectors(),
> to hard coded the number of the allocate vectors.
>
> Thanks,
> Zhu Lingshan
We can verify the tests result by checking the requested IRQs for the
two VMs(one vhost-vdpa device per VM).
(1)when setting nvectors = 1, only one IRQs requested per VM/device, all
vqs and the config interrupt share this IRQ.
[lszhu@cra01infra01 ~]$ cat /proc/interrupts | grep ifc
241: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 45 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534528-edge ifcvf[0000:01:00.5]-dev-shared-irq
251: 0 0 0 0 0 0
0 0 0 0 0 41 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
536576-edge ifcvf[0000:01:00.6]-dev-shared-irq
(2)when setting nvectors = 2, two IRQs requested for each VM/device, one
for all vqs, the other for the config interrupt
[lszhu@cra01infra01 ~]$ cat /proc/interrupts | grep ifc
241: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 39 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534528-edge ifcvf[0000:01:00.5]-vqs-shared-irq
242: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534529-edge ifcvf[0000:01:00.5]-config
251: 0 0 0 0 0 0
0 0 0 0 0 39 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
536576-edge ifcvf[0000:01:00.6]-vqs-shared-irq
252: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
536577-edge ifcvf[0000:01:00.6]-config
(3)when remove nvectors hardcode, the driver allocates enough vectors
for the queues and config interrupt,
and we do see better performance because irq_bypass is enabled:(too many
lines, cut off)
[lszhu@cra01infra01 linux]$ cat /proc/interrupts | grep ifc
241: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534528-edge ifcvf[0000:01:00.5]-0
242: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534529-edge ifcvf[0000:01:00.5]-1
243: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534530-edge ifcvf[0000:01:00.5]-2
244: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534531-edge ifcvf[0000:01:00.5]-3
245: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534532-edge ifcvf[0000:01:00.5]-4
246: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534533-edge ifcvf[0000:01:00.5]-5
247: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534534-edge ifcvf[0000:01:00.5]-6
248: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534535-edge ifcvf[0000:01:00.5]-7
249: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534536-edge ifcvf[0000:01:00.5]-8
250: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
534537-edge ifcvf[0000:01:00.5]-config
251: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
536576-edge ifcvf[0000:01:00.6]-0
252: 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 0 0 0 IR-PCI-MSI
536577-edge ifcvf[0000:01:00.6]-1
Thanks,
Zhu Lingshan
>>
>>> Changes from V2:
>>> (1) Fix misuse of nvectors(in ifcvf_alloc_vectors return
>>> value)(Michael)
>>> (2) Fix misuse of irq = get_vq_irq() in setup irqbypass(Michael)
>>> (3) Coding style improvements(Michael)
>>> (4) Better naming of device shared irq/shared vq irq
>>>
>>> Changes from V1:
>>> (1) Enable config interrupt when only one vector is allocated(Michael)
>>> (2) Clean vectors/IRQs if failed to request config interrupt
>>> since config interrupt is a must(Michael)
>>> (3) Keep local vdpa_ops, disable irq_bypass by setting IRQ = -EINVAL
>>> for shared IRQ case(Michael)
>>> (4) Improvements on error messages(Michael)
>>> (5) Squash functions implementation patches to the callers(Michael)
>>>
>>> Zhu Lingshan (4):
>>> vDPA/ifcvf: implement IO read/write helpers in the header file
>>> vDPA/ifcvf: implement device MSIX vector allocator
>>> vhost_vdpa: don't setup irq offloading when irq_num < 0
>>> vDPA/ifcvf: implement shared IRQ feature
>>>
>>> drivers/vdpa/ifcvf/ifcvf_base.c | 67 +++-----
>>> drivers/vdpa/ifcvf/ifcvf_base.h | 60 +++++++-
>>> drivers/vdpa/ifcvf/ifcvf_main.c | 260
>>> ++++++++++++++++++++++++++++----
>>> drivers/vhost/vdpa.c | 4 +
>>> 4 files changed, 312 insertions(+), 79 deletions(-)
>>>
>>> --
>>> 2.27.0
>
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2022-01-27 8:27 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-01-26 12:49 [PATCH V3 0/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 1/4] vDPA/ifcvf: implement IO read/write helpers in the header file Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 2/4] vDPA/ifcvf: implement device MSIX vector allocator Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 3/4] vhost_vdpa: don't setup irq offloading when irq_num < 0 Zhu Lingshan
2022-01-26 12:49 ` [PATCH V3 4/4] vDPA/ifcvf: implement shared IRQ feature Zhu Lingshan
2022-01-26 14:14 ` [PATCH V3 0/4] " Michael S. Tsirkin
2022-01-27 4:31 ` Zhu, Lingshan
2022-01-27 8:27 ` Zhu, Lingshan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).