public inbox for dmaengine@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] refactor driver to only enable irq for wq enable
@ 2021-12-13 18:51 Dave Jiang
  2021-12-13 18:51 ` [PATCH 1/3] dmaengine: idxd: embed irq_entry in idxd_wq struct Dave Jiang
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Dave Jiang @ 2021-12-13 18:51 UTC (permalink / raw)
  To: vkoul, dmaengine

This patch series refactors the code to enable irq on wq enable only
when necessary instead of allocation all msix vectors at probe. This saves
CPU irq vector allocation on x86 platforms.

MSIX vector 0 is special and remain being allocated at probe.
---

Dave Jiang (3):
      dmaengine: idxd: embed irq_entry in idxd_wq struct
      dmaengine: idxd: fix descriptor flushing locking
      dmaengine: idxd: change MSIX allocation based on per wq activation


 drivers/dma/idxd/device.c | 163 +++++++++++++++++++++-------------
 drivers/dma/idxd/dma.c    |  12 +++
 drivers/dma/idxd/idxd.h   |  29 +++---
 drivers/dma/idxd/init.c   | 181 ++++++--------------------------------
 drivers/dma/idxd/irq.c    |  13 +--
 drivers/dma/idxd/submit.c |   8 +-
 drivers/dma/idxd/sysfs.c  |   1 -
 include/uapi/linux/idxd.h |   1 +
 8 files changed, 168 insertions(+), 240 deletions(-)

--


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/3] dmaengine: idxd: embed irq_entry in idxd_wq struct
  2021-12-13 18:51 [PATCH 0/3] refactor driver to only enable irq for wq enable Dave Jiang
@ 2021-12-13 18:51 ` Dave Jiang
  2021-12-13 18:51 ` [PATCH 2/3] dmaengine: idxd: fix descriptor flushing locking Dave Jiang
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Dave Jiang @ 2021-12-13 18:51 UTC (permalink / raw)
  To: vkoul, dmaengine

With irq_entry already being associated with the wq in a 1:1 relationship,
embed the irq_entry in the idxd_wq struct and remove back pointers for
idxe_wq and idxd_device. In the process of this work, clean up the interrupt
handle assignment so that there's no decision to be made during submit
call on where interrupt handle value comes from. Set the interrupt handle
during irq request initialization time.

irq_entry 0 is designated as special and is tied to the device itself.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/dma/idxd/device.c |   18 +++----
 drivers/dma/idxd/idxd.h   |   22 ++++++--
 drivers/dma/idxd/init.c   |  119 ++++++++++++++++++---------------------------
 drivers/dma/idxd/irq.c    |   10 ++--
 drivers/dma/idxd/submit.c |    8 +--
 drivers/dma/idxd/sysfs.c  |    1 
 6 files changed, 79 insertions(+), 99 deletions(-)

diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 5a50ee6f6881..8233a29f859d 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -21,8 +21,11 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
 /* Interrupt control bits */
 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
 {
-	struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
+	struct idxd_irq_entry *ie;
+	struct irq_data *data;
 
+	ie = idxd_get_ie(idxd, vec_id);
+	data = irq_get_irq_data(ie->vector);
 	pci_msi_mask_irq(data);
 }
 
@@ -38,8 +41,11 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
 
 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
 {
-	struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
+	struct idxd_irq_entry *ie;
+	struct irq_data *data;
 
+	ie = idxd_get_ie(idxd, vec_id);
+	data = irq_get_irq_data(ie->vector);
 	pci_msi_unmask_irq(data);
 }
 
@@ -1216,13 +1222,6 @@ int __drv_enable_wq(struct idxd_wq *wq)
 		goto err;
 	}
 
-	/*
-	 * Device has 1 misc interrupt and N interrupts for descriptor completion. To
-	 * assign WQ to interrupt, we will take the N+1 interrupt since vector 0 is
-	 * for the misc interrupt.
-	 */
-	wq->ie = &idxd->irq_entries[wq->id + 1];
-
 	rc = idxd_wq_enable(wq);
 	if (rc < 0) {
 		dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
@@ -1273,7 +1272,6 @@ void __drv_disable_wq(struct idxd_wq *wq)
 	idxd_wq_drain(wq);
 	idxd_wq_reset(wq);
 
-	wq->ie = NULL;
 	wq->client_count = 0;
 }
 
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 6b9bfdc557fe..d77be03dd8b0 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -70,7 +70,6 @@ extern struct idxd_device_driver idxd_user_drv;
 
 #define INVALID_INT_HANDLE	-1
 struct idxd_irq_entry {
-	struct idxd_device *idxd;
 	int id;
 	int vector;
 	struct llist_head pending_llist;
@@ -81,7 +80,6 @@ struct idxd_irq_entry {
 	 */
 	spinlock_t list_lock;
 	int int_handle;
-	struct idxd_wq *wq;
 	ioasid_t pasid;
 };
 
@@ -185,7 +183,7 @@ struct idxd_wq {
 	struct wait_queue_head err_queue;
 	struct idxd_device *idxd;
 	int id;
-	struct idxd_irq_entry *ie;
+	struct idxd_irq_entry ie;
 	enum idxd_wq_type type;
 	struct idxd_group *group;
 	int client_count;
@@ -266,6 +264,7 @@ struct idxd_device {
 	int id;
 	int major;
 	u32 cmd_status;
+	struct idxd_irq_entry ie;	/* misc irq, msix 0 */
 
 	struct pci_dev *pdev;
 	void __iomem *reg_base;
@@ -302,8 +301,6 @@ struct idxd_device {
 
 	union sw_err_reg sw_err;
 	wait_queue_head_t cmd_waitq;
-	int num_wq_irqs;
-	struct idxd_irq_entry *irq_entries;
 
 	struct idxd_dma_dev *idxd_dma;
 	struct workqueue_struct *wq;
@@ -395,6 +392,21 @@ static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
 	idev->type = type;
 }
 
+static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
+{
+	return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
+}
+
+static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
+{
+	return container_of(ie, struct idxd_wq, ie);
+}
+
+static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
+{
+	return container_of(ie, struct idxd_device, ie);
+}
+
 extern struct bus_type dsa_bus_type;
 
 extern bool support_enqcmd;
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 8b3afce9ea67..29c732a94027 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -72,7 +72,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
 {
 	struct pci_dev *pdev = idxd->pdev;
 	struct device *dev = &pdev->dev;
-	struct idxd_irq_entry *irq_entry;
+	struct idxd_irq_entry *ie;
 	int i, msixcnt;
 	int rc = 0;
 
@@ -90,72 +90,54 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
 	}
 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
 
-	/*
-	 * We implement 1 completion list per MSI-X entry except for
-	 * entry 0, which is for errors and others.
-	 */
-	idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
-					 GFP_KERNEL, dev_to_node(dev));
-	if (!idxd->irq_entries) {
-		rc = -ENOMEM;
-		goto err_irq_entries;
-	}
-
-	for (i = 0; i < msixcnt; i++) {
-		idxd->irq_entries[i].id = i;
-		idxd->irq_entries[i].idxd = idxd;
-		/*
-		 * Association of WQ should be assigned starting with irq_entry 1.
-		 * irq_entry 0 is for misc interrupts and has no wq association
-		 */
-		if (i > 0)
-			idxd->irq_entries[i].wq = idxd->wqs[i - 1];
-		idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
-		idxd->irq_entries[i].int_handle = INVALID_INT_HANDLE;
-		if (device_pasid_enabled(idxd) && i > 0)
-			idxd->irq_entries[i].pasid = idxd->pasid;
-		else
-			idxd->irq_entries[i].pasid = INVALID_IOASID;
-		spin_lock_init(&idxd->irq_entries[i].list_lock);
-	}
-
 	idxd_msix_perm_setup(idxd);
 
-	irq_entry = &idxd->irq_entries[0];
-	rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
-				  0, "idxd-misc", irq_entry);
+	ie = idxd_get_ie(idxd, 0);
+	ie->vector = pci_irq_vector(pdev, 0);
+	rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
 	if (rc < 0) {
 		dev_err(dev, "Failed to allocate misc interrupt.\n");
 		goto err_misc_irq;
 	}
 
-	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
+	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", ie->vector);
 
-	/* first MSI-X entry is not for wq interrupts */
-	idxd->num_wq_irqs = msixcnt - 1;
+	for (i = 0; i < idxd->max_wqs; i++) {
+		int msix_idx = i + 1;
+
+		ie = idxd_get_ie(idxd, msix_idx);
 
-	for (i = 1; i < msixcnt; i++) {
-		irq_entry = &idxd->irq_entries[i];
+		/* MSIX vector 0 special, wq irq entry starts at 1 */
+		ie->id = msix_idx;
+		ie->vector = pci_irq_vector(pdev, msix_idx);
+		ie->int_handle = INVALID_INT_HANDLE;
+		if (device_pasid_enabled(idxd) && i > 0)
+			ie->pasid = idxd->pasid;
+		else
+			ie->pasid = INVALID_IOASID;
+		spin_lock_init(&ie->list_lock);
+		init_llist_head(&ie->pending_llist);
+		INIT_LIST_HEAD(&ie->work_list);
 
-		init_llist_head(&idxd->irq_entries[i].pending_llist);
-		INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
-		rc = request_threaded_irq(irq_entry->vector, NULL,
-					  idxd_wq_thread, 0, "idxd-portal", irq_entry);
+		rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
 		if (rc < 0) {
-			dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
+			dev_err(dev, "Failed to allocate irq %d.\n", ie->vector);
 			goto err_wq_irqs;
 		}
 
-		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
+		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, ie->vector);
 		if (idxd->request_int_handles) {
-			rc = idxd_device_request_int_handle(idxd, i, &irq_entry->int_handle,
+			rc = idxd_device_request_int_handle(idxd, i, &ie->int_handle,
 							    IDXD_IRQ_MSIX);
 			if (rc < 0) {
-				free_irq(irq_entry->vector, irq_entry);
+				free_irq(ie->vector, ie);
 				goto err_wq_irqs;
 			}
-			dev_dbg(dev, "int handle requested: %u\n", irq_entry->int_handle);
+			dev_dbg(dev, "int handle requested: %u\n", ie->int_handle);
+		} else {
+			ie->int_handle = msix_idx;
 		}
+
 	}
 
 	idxd_unmask_error_interrupts(idxd);
@@ -163,23 +145,19 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
 
  err_wq_irqs:
 	while (--i >= 0) {
-		irq_entry = &idxd->irq_entries[i];
-		free_irq(irq_entry->vector, irq_entry);
-		if (irq_entry->int_handle != INVALID_INT_HANDLE) {
-			idxd_device_release_int_handle(idxd, irq_entry->int_handle,
-						       IDXD_IRQ_MSIX);
-			irq_entry->int_handle = INVALID_INT_HANDLE;
-			irq_entry->pasid = INVALID_IOASID;
+		ie = &idxd->wqs[i]->ie;
+		free_irq(ie->vector, ie);
+		if (ie->int_handle != INVALID_INT_HANDLE) {
+			idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
+			ie->int_handle = INVALID_INT_HANDLE;
+			ie->pasid = INVALID_IOASID;
 		}
-		irq_entry->vector = -1;
-		irq_entry->wq = NULL;
-		irq_entry->idxd = NULL;
+		ie->vector = -1;
 	}
  err_misc_irq:
 	/* Disable error interrupt generation */
 	idxd_mask_error_interrupts(idxd);
 	idxd_msix_perm_clear(idxd);
- err_irq_entries:
 	pci_free_irq_vectors(pdev);
 	dev_err(dev, "No usable interrupts\n");
 	return rc;
@@ -188,21 +166,18 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
 static void idxd_cleanup_interrupts(struct idxd_device *idxd)
 {
 	struct pci_dev *pdev = idxd->pdev;
-	struct idxd_irq_entry *irq_entry;
+	struct idxd_irq_entry *ie;
 	int i;
 
 	for (i = 0; i < idxd->irq_cnt; i++) {
-		irq_entry = &idxd->irq_entries[i];
-		if (irq_entry->int_handle != INVALID_INT_HANDLE) {
-			idxd_device_release_int_handle(idxd, irq_entry->int_handle,
-						       IDXD_IRQ_MSIX);
-			irq_entry->int_handle = INVALID_INT_HANDLE;
-			irq_entry->pasid = INVALID_IOASID;
+		ie = idxd_get_ie(idxd, i);
+		if (ie->int_handle != INVALID_INT_HANDLE) {
+			idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
+			ie->int_handle = INVALID_INT_HANDLE;
+			ie->pasid = INVALID_IOASID;
 		}
-		irq_entry->vector = -1;
-		irq_entry->wq = NULL;
-		irq_entry->idxd = NULL;
-		free_irq(irq_entry->vector, irq_entry);
+		free_irq(ie->vector, ie);
+		ie->vector = -1;
 	}
 
 	idxd_mask_error_interrupts(idxd);
@@ -755,7 +730,7 @@ static void idxd_release_int_handles(struct idxd_device *idxd)
 	int i, rc;
 
 	for (i = 1; i < idxd->irq_cnt; i++) {
-		struct idxd_irq_entry *ie = &idxd->irq_entries[i];
+		struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
 
 		if (ie->int_handle != INVALID_INT_HANDLE) {
 			rc = idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
@@ -783,7 +758,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
 	idxd_mask_error_interrupts(idxd);
 
 	for (i = 0; i < msixcnt; i++) {
-		irq_entry = &idxd->irq_entries[i];
+		irq_entry = idxd_get_ie(idxd, i);
 		synchronize_irq(irq_entry->vector);
 		if (i == 0)
 			continue;
@@ -815,7 +790,7 @@ static void idxd_remove(struct pci_dev *pdev)
 		idxd_disable_system_pasid(idxd);
 
 	for (i = 0; i < msixcnt; i++) {
-		irq_entry = &idxd->irq_entries[i];
+		irq_entry = idxd_get_ie(idxd, i);
 		free_irq(irq_entry->vector, irq_entry);
 	}
 	idxd_msix_perm_clear(idxd);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 0b0055a0ad2a..a50f1bec9a4e 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -73,8 +73,8 @@ static void idxd_device_reinit(struct work_struct *work)
  */
 static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie)
 {
-	struct idxd_wq *wq = ie->wq;
-	struct idxd_device *idxd = ie->idxd;
+	struct idxd_wq *wq = ie_to_wq(ie);
+	struct idxd_device *idxd = wq->idxd;
 	struct device *dev = &idxd->pdev->dev;
 	struct dsa_hw_desc desc = {};
 	void __iomem *portal;
@@ -155,8 +155,8 @@ static void idxd_int_handle_revoke(struct work_struct *work)
 	 * at the end to make sure all invalid int handle descriptors are processed.
 	 */
 	for (i = 1; i < idxd->irq_cnt; i++) {
-		struct idxd_irq_entry *ie = &idxd->irq_entries[i];
-		struct idxd_wq *wq = ie->wq;
+		struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
+		struct idxd_wq *wq = ie_to_wq(ie);
 
 		rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX);
 		if (rc < 0) {
@@ -338,7 +338,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
 irqreturn_t idxd_misc_thread(int vec, void *data)
 {
 	struct idxd_irq_entry *irq_entry = data;
-	struct idxd_device *idxd = irq_entry->idxd;
+	struct idxd_device *idxd = ie_to_idxd(irq_entry);
 	int rc;
 	u32 cause;
 
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 569815a84e95..d9232bfb139f 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -177,12 +177,8 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
 	 * that we designated the descriptor to.
 	 */
 	if (desc_flags & IDXD_OP_FLAG_RCI) {
-		ie = wq->ie;
-		if (ie->int_handle == INVALID_INT_HANDLE)
-			desc->hw->int_handle = ie->id;
-		else
-			desc->hw->int_handle = ie->int_handle;
-
+		ie = &wq->ie;
+		desc->hw->int_handle = ie->int_handle;
 		llist_add(&desc->llnode, &ie->pending_llist);
 	}
 
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index c0fec88ff6c1..13404532131b 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1304,7 +1304,6 @@ static void idxd_conf_device_release(struct device *dev)
 	kfree(idxd->groups);
 	kfree(idxd->wqs);
 	kfree(idxd->engines);
-	kfree(idxd->irq_entries);
 	ida_free(&idxd_ida, idxd->id);
 	kfree(idxd);
 }



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/3] dmaengine: idxd: fix descriptor flushing locking
  2021-12-13 18:51 [PATCH 0/3] refactor driver to only enable irq for wq enable Dave Jiang
  2021-12-13 18:51 ` [PATCH 1/3] dmaengine: idxd: embed irq_entry in idxd_wq struct Dave Jiang
@ 2021-12-13 18:51 ` Dave Jiang
  2021-12-13 18:51 ` [PATCH 3/3] dmaengine: idxd: change MSIX allocation based on per wq activation Dave Jiang
  2022-01-05  7:42 ` [PATCH 0/3] refactor driver to only enable irq for wq enable Vinod Koul
  3 siblings, 0 replies; 5+ messages in thread
From: Dave Jiang @ 2021-12-13 18:51 UTC (permalink / raw)
  To: vkoul, dmaengine

The descriptor flushing for shutdown is not holding the irq_entry list
lock. If there's ongoing interrupt completion handling, this can corrupt
the list. Add locking to protect list walking. Also refactor the code so
it's more compact.

Fixes: 8f47d1a5e545 ("dmaengine: idxd: connect idxd to dmaengine subsystem")
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/dma/idxd/init.c |   29 +++++++++++++++--------------
 1 file changed, 15 insertions(+), 14 deletions(-)

diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 29c732a94027..03c735727f68 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -689,26 +689,28 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	return rc;
 }
 
-static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
+static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
 {
 	struct idxd_desc *desc, *itr;
 	struct llist_node *head;
+	LIST_HEAD(flist);
+	enum idxd_complete_type ctype;
 
+	spin_lock(&ie->list_lock);
 	head = llist_del_all(&ie->pending_llist);
-	if (!head)
-		return;
-
-	llist_for_each_entry_safe(desc, itr, head, llnode)
-		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
-}
+	if (head) {
+		llist_for_each_entry_safe(desc, itr, head, llnode)
+			list_add_tail(&desc->list, &ie->work_list);
+	}
 
-static void idxd_flush_work_list(struct idxd_irq_entry *ie)
-{
-	struct idxd_desc *desc, *iter;
+	list_for_each_entry_safe(desc, itr, &ie->work_list, list)
+		list_move_tail(&desc->list, &flist);
+	spin_unlock(&ie->list_lock);
 
-	list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
+	list_for_each_entry_safe(desc, itr, &flist, list) {
 		list_del(&desc->list);
-		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
+		ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+		idxd_dma_complete_txd(desc, ctype, true);
 	}
 }
 
@@ -762,8 +764,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
 		synchronize_irq(irq_entry->vector);
 		if (i == 0)
 			continue;
-		idxd_flush_pending_llist(irq_entry);
-		idxd_flush_work_list(irq_entry);
+		idxd_flush_pending_descs(irq_entry);
 	}
 	flush_workqueue(idxd->wq);
 }



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/3] dmaengine: idxd: change MSIX allocation based on per wq activation
  2021-12-13 18:51 [PATCH 0/3] refactor driver to only enable irq for wq enable Dave Jiang
  2021-12-13 18:51 ` [PATCH 1/3] dmaengine: idxd: embed irq_entry in idxd_wq struct Dave Jiang
  2021-12-13 18:51 ` [PATCH 2/3] dmaengine: idxd: fix descriptor flushing locking Dave Jiang
@ 2021-12-13 18:51 ` Dave Jiang
  2022-01-05  7:42 ` [PATCH 0/3] refactor driver to only enable irq for wq enable Vinod Koul
  3 siblings, 0 replies; 5+ messages in thread
From: Dave Jiang @ 2021-12-13 18:51 UTC (permalink / raw)
  To: vkoul, dmaengine

Change the driver where WQ interrupt is requested only when wq is being
enabled. This new scheme set things up so that request_threaded_irq() is
only called when a kernel wq type is being enabled. This also sets up for
future interrupt request where different interrupt handler such as wq
occupancy interrupt can be setup instead of the wq completion interrupt.

Not calling request_irq() until the WQ actually needs an irq also prevents
wasting of CPU irq vectors on x86 systems, which is a limited resource.

idxd_flush_pending_descs() is moved to device.c since descriptor flushing
is now part of wq disable rather than shutdown().

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
 drivers/dma/idxd/device.c |  161 ++++++++++++++++++++++++++++-----------------
 drivers/dma/idxd/dma.c    |   12 +++
 drivers/dma/idxd/idxd.h   |    7 +-
 drivers/dma/idxd/init.c   |  133 ++++---------------------------------
 drivers/dma/idxd/irq.c    |    3 +
 include/uapi/linux/idxd.h |    1 
 6 files changed, 132 insertions(+), 185 deletions(-)

diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 8233a29f859d..280b41417f41 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -19,36 +19,6 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
 
 /* Interrupt control bits */
-void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
-{
-	struct idxd_irq_entry *ie;
-	struct irq_data *data;
-
-	ie = idxd_get_ie(idxd, vec_id);
-	data = irq_get_irq_data(ie->vector);
-	pci_msi_mask_irq(data);
-}
-
-void idxd_mask_msix_vectors(struct idxd_device *idxd)
-{
-	struct pci_dev *pdev = idxd->pdev;
-	int msixcnt = pci_msix_vec_count(pdev);
-	int i;
-
-	for (i = 0; i < msixcnt; i++)
-		idxd_mask_msix_vector(idxd, i);
-}
-
-void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
-{
-	struct idxd_irq_entry *ie;
-	struct irq_data *data;
-
-	ie = idxd_get_ie(idxd, vec_id);
-	data = irq_get_irq_data(ie->vector);
-	pci_msi_unmask_irq(data);
-}
-
 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
 {
 	union genctrl_reg genctrl;
@@ -593,7 +563,6 @@ void idxd_device_reset(struct idxd_device *idxd)
 	idxd_device_clear_state(idxd);
 	idxd->state = IDXD_DEV_DISABLED;
 	idxd_unmask_error_interrupts(idxd);
-	idxd_msix_perm_setup(idxd);
 	spin_unlock(&idxd->dev_lock);
 }
 
@@ -732,36 +701,6 @@ void idxd_device_clear_state(struct idxd_device *idxd)
 	idxd_device_wqs_clear_state(idxd);
 }
 
-void idxd_msix_perm_setup(struct idxd_device *idxd)
-{
-	union msix_perm mperm;
-	int i, msixcnt;
-
-	msixcnt = pci_msix_vec_count(idxd->pdev);
-	if (msixcnt < 0)
-		return;
-
-	mperm.bits = 0;
-	mperm.pasid = idxd->pasid;
-	mperm.pasid_en = device_pasid_enabled(idxd);
-	for (i = 1; i < msixcnt; i++)
-		iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-}
-
-void idxd_msix_perm_clear(struct idxd_device *idxd)
-{
-	union msix_perm mperm;
-	int i, msixcnt;
-
-	msixcnt = pci_msix_vec_count(idxd->pdev);
-	if (msixcnt < 0)
-		return;
-
-	mperm.bits = 0;
-	for (i = 1; i < msixcnt; i++)
-		iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-}
-
 static void idxd_group_config_write(struct idxd_group *group)
 {
 	struct idxd_device *idxd = group->idxd;
@@ -1158,6 +1097,106 @@ int idxd_device_load_config(struct idxd_device *idxd)
 	return 0;
 }
 
+static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
+{
+	struct idxd_desc *desc, *itr;
+	struct llist_node *head;
+	LIST_HEAD(flist);
+	enum idxd_complete_type ctype;
+
+	spin_lock(&ie->list_lock);
+	head = llist_del_all(&ie->pending_llist);
+	if (head) {
+		llist_for_each_entry_safe(desc, itr, head, llnode)
+			list_add_tail(&desc->list, &ie->work_list);
+	}
+
+	list_for_each_entry_safe(desc, itr, &ie->work_list, list)
+		list_move_tail(&desc->list, &flist);
+	spin_unlock(&ie->list_lock);
+
+	list_for_each_entry_safe(desc, itr, &flist, list) {
+		list_del(&desc->list);
+		ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+		idxd_dma_complete_txd(desc, ctype, true);
+	}
+}
+
+static void idxd_device_set_perm_entry(struct idxd_device *idxd,
+				       struct idxd_irq_entry *ie)
+{
+	union msix_perm mperm;
+
+	if (ie->pasid == INVALID_IOASID)
+		return;
+
+	mperm.bits = 0;
+	mperm.pasid = ie->pasid;
+	mperm.pasid_en = 1;
+	iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
+}
+
+static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
+					 struct idxd_irq_entry *ie)
+{
+	iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
+}
+
+void idxd_wq_free_irq(struct idxd_wq *wq)
+{
+	struct idxd_device *idxd = wq->idxd;
+	struct idxd_irq_entry *ie = &wq->ie;
+
+	synchronize_irq(ie->vector);
+	free_irq(ie->vector, ie);
+	idxd_flush_pending_descs(ie);
+	if (idxd->request_int_handles)
+		idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
+	idxd_device_clear_perm_entry(idxd, ie);
+	ie->vector = -1;
+	ie->int_handle = INVALID_INT_HANDLE;
+	ie->pasid = INVALID_IOASID;
+}
+
+int idxd_wq_request_irq(struct idxd_wq *wq)
+{
+	struct idxd_device *idxd = wq->idxd;
+	struct pci_dev *pdev = idxd->pdev;
+	struct device *dev = &pdev->dev;
+	struct idxd_irq_entry *ie;
+	int rc;
+
+	ie = &wq->ie;
+	ie->vector = pci_irq_vector(pdev, ie->id);
+	ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
+	idxd_device_set_perm_entry(idxd, ie);
+
+	rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
+	if (rc < 0) {
+		dev_err(dev, "Failed to request irq %d.\n", ie->vector);
+		goto err_irq;
+	}
+
+	if (idxd->request_int_handles) {
+		rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
+						    IDXD_IRQ_MSIX);
+		if (rc < 0)
+			goto err_int_handle;
+	} else {
+		ie->int_handle = ie->id;
+	}
+
+	return 0;
+
+err_int_handle:
+	ie->int_handle = INVALID_INT_HANDLE;
+	free_irq(ie->vector, ie);
+err_irq:
+	idxd_device_clear_perm_entry(idxd, ie);
+	ie->pasid = INVALID_IOASID;
+	return rc;
+}
+
 int __drv_enable_wq(struct idxd_wq *wq)
 {
 	struct idxd_device *idxd = wq->idxd;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 2ce873994e33..bfff59617d04 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -289,6 +289,14 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
 
 	mutex_lock(&wq->wq_lock);
 	wq->type = IDXD_WQT_KERNEL;
+
+	rc = idxd_wq_request_irq(wq);
+	if (rc < 0) {
+		idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
+		dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
+		goto err_irq;
+	}
+
 	rc = __drv_enable_wq(wq);
 	if (rc < 0) {
 		dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
@@ -329,6 +337,8 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
 err_res_alloc:
 	__drv_disable_wq(wq);
 err:
+	idxd_wq_free_irq(wq);
+err_irq:
 	wq->type = IDXD_WQT_NONE;
 	mutex_unlock(&wq->wq_lock);
 	return rc;
@@ -344,6 +354,8 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
 	idxd_wq_free_resources(wq);
 	__drv_disable_wq(wq);
 	percpu_ref_exit(&wq->wq_active);
+	idxd_wq_free_irq(wq);
+	wq->type = IDXD_WQT_NONE;
 	mutex_unlock(&wq->wq_lock);
 }
 
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d77be03dd8b0..6353e762286d 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -548,15 +548,10 @@ void idxd_wqs_quiesce(struct idxd_device *idxd);
 bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
 
 /* device interrupt control */
-void idxd_msix_perm_setup(struct idxd_device *idxd);
-void idxd_msix_perm_clear(struct idxd_device *idxd);
 irqreturn_t idxd_misc_thread(int vec, void *data);
 irqreturn_t idxd_wq_thread(int irq, void *data);
 void idxd_mask_error_interrupts(struct idxd_device *idxd);
 void idxd_unmask_error_interrupts(struct idxd_device *idxd);
-void idxd_mask_msix_vectors(struct idxd_device *idxd);
-void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
-void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
 
 /* device control */
 int idxd_register_idxd_drv(void);
@@ -595,6 +590,8 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq);
 void __idxd_wq_quiesce(struct idxd_wq *wq);
 void idxd_wq_quiesce(struct idxd_wq *wq);
 int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
+void idxd_wq_free_irq(struct idxd_wq *wq);
+int idxd_wq_request_irq(struct idxd_wq *wq);
 
 /* submission */
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 03c735727f68..3505efb7ae71 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -90,7 +90,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
 	}
 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
 
-	idxd_msix_perm_setup(idxd);
 
 	ie = idxd_get_ie(idxd, 0);
 	ie->vector = pci_irq_vector(pdev, 0);
@@ -99,65 +98,26 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
 		dev_err(dev, "Failed to allocate misc interrupt.\n");
 		goto err_misc_irq;
 	}
-
-	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", ie->vector);
+	dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
 
 	for (i = 0; i < idxd->max_wqs; i++) {
 		int msix_idx = i + 1;
 
 		ie = idxd_get_ie(idxd, msix_idx);
-
-		/* MSIX vector 0 special, wq irq entry starts at 1 */
 		ie->id = msix_idx;
-		ie->vector = pci_irq_vector(pdev, msix_idx);
 		ie->int_handle = INVALID_INT_HANDLE;
-		if (device_pasid_enabled(idxd) && i > 0)
-			ie->pasid = idxd->pasid;
-		else
-			ie->pasid = INVALID_IOASID;
+		ie->pasid = INVALID_IOASID;
+
 		spin_lock_init(&ie->list_lock);
 		init_llist_head(&ie->pending_llist);
 		INIT_LIST_HEAD(&ie->work_list);
-
-		rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
-		if (rc < 0) {
-			dev_err(dev, "Failed to allocate irq %d.\n", ie->vector);
-			goto err_wq_irqs;
-		}
-
-		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, ie->vector);
-		if (idxd->request_int_handles) {
-			rc = idxd_device_request_int_handle(idxd, i, &ie->int_handle,
-							    IDXD_IRQ_MSIX);
-			if (rc < 0) {
-				free_irq(ie->vector, ie);
-				goto err_wq_irqs;
-			}
-			dev_dbg(dev, "int handle requested: %u\n", ie->int_handle);
-		} else {
-			ie->int_handle = msix_idx;
-		}
-
 	}
 
 	idxd_unmask_error_interrupts(idxd);
 	return 0;
 
- err_wq_irqs:
-	while (--i >= 0) {
-		ie = &idxd->wqs[i]->ie;
-		free_irq(ie->vector, ie);
-		if (ie->int_handle != INVALID_INT_HANDLE) {
-			idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
-			ie->int_handle = INVALID_INT_HANDLE;
-			ie->pasid = INVALID_IOASID;
-		}
-		ie->vector = -1;
-	}
  err_misc_irq:
-	/* Disable error interrupt generation */
 	idxd_mask_error_interrupts(idxd);
-	idxd_msix_perm_clear(idxd);
 	pci_free_irq_vectors(pdev);
 	dev_err(dev, "No usable interrupts\n");
 	return rc;
@@ -167,20 +127,15 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd)
 {
 	struct pci_dev *pdev = idxd->pdev;
 	struct idxd_irq_entry *ie;
-	int i;
+	int msixcnt;
 
-	for (i = 0; i < idxd->irq_cnt; i++) {
-		ie = idxd_get_ie(idxd, i);
-		if (ie->int_handle != INVALID_INT_HANDLE) {
-			idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
-			ie->int_handle = INVALID_INT_HANDLE;
-			ie->pasid = INVALID_IOASID;
-		}
-		free_irq(ie->vector, ie);
-		ie->vector = -1;
-	}
+	msixcnt = pci_msix_vec_count(pdev);
+	if (msixcnt <= 0)
+		return;
 
+	ie = idxd_get_ie(idxd, 0);
 	idxd_mask_error_interrupts(idxd);
+	free_irq(ie->vector, ie);
 	pci_free_irq_vectors(pdev);
 }
 
@@ -592,8 +547,6 @@ static int idxd_probe(struct idxd_device *idxd)
 	if (rc)
 		goto err_config;
 
-	dev_dbg(dev, "IDXD interrupt setup complete.\n");
-
 	idxd->major = idxd_cdev_get_major(idxd);
 
 	rc = perfmon_pmu_init(idxd);
@@ -689,31 +642,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	return rc;
 }
 
-static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
-{
-	struct idxd_desc *desc, *itr;
-	struct llist_node *head;
-	LIST_HEAD(flist);
-	enum idxd_complete_type ctype;
-
-	spin_lock(&ie->list_lock);
-	head = llist_del_all(&ie->pending_llist);
-	if (head) {
-		llist_for_each_entry_safe(desc, itr, head, llnode)
-			list_add_tail(&desc->list, &ie->work_list);
-	}
-
-	list_for_each_entry_safe(desc, itr, &ie->work_list, list)
-		list_move_tail(&desc->list, &flist);
-	spin_unlock(&ie->list_lock);
-
-	list_for_each_entry_safe(desc, itr, &flist, list) {
-		list_del(&desc->list);
-		ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
-		idxd_dma_complete_txd(desc, ctype, true);
-	}
-}
-
 void idxd_wqs_quiesce(struct idxd_device *idxd)
 {
 	struct idxd_wq *wq;
@@ -726,46 +654,19 @@ void idxd_wqs_quiesce(struct idxd_device *idxd)
 	}
 }
 
-static void idxd_release_int_handles(struct idxd_device *idxd)
-{
-	struct device *dev = &idxd->pdev->dev;
-	int i, rc;
-
-	for (i = 1; i < idxd->irq_cnt; i++) {
-		struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
-
-		if (ie->int_handle != INVALID_INT_HANDLE) {
-			rc = idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
-			if (rc < 0)
-				dev_warn(dev, "irq handle %d release failed\n", ie->int_handle);
-			else
-				dev_dbg(dev, "int handle released: %u\n", ie->int_handle);
-		}
-	}
-}
-
 static void idxd_shutdown(struct pci_dev *pdev)
 {
 	struct idxd_device *idxd = pci_get_drvdata(pdev);
-	int rc, i;
 	struct idxd_irq_entry *irq_entry;
-	int msixcnt = pci_msix_vec_count(pdev);
+	int rc;
 
 	rc = idxd_device_disable(idxd);
 	if (rc)
 		dev_err(&pdev->dev, "Disabling device failed\n");
 
-	dev_dbg(&pdev->dev, "%s called\n", __func__);
-	idxd_mask_msix_vectors(idxd);
+	irq_entry = &idxd->ie;
+	synchronize_irq(irq_entry->vector);
 	idxd_mask_error_interrupts(idxd);
-
-	for (i = 0; i < msixcnt; i++) {
-		irq_entry = idxd_get_ie(idxd, i);
-		synchronize_irq(irq_entry->vector);
-		if (i == 0)
-			continue;
-		idxd_flush_pending_descs(irq_entry);
-	}
 	flush_workqueue(idxd->wq);
 }
 
@@ -773,8 +674,6 @@ static void idxd_remove(struct pci_dev *pdev)
 {
 	struct idxd_device *idxd = pci_get_drvdata(pdev);
 	struct idxd_irq_entry *irq_entry;
-	int msixcnt = pci_msix_vec_count(pdev);
-	int i;
 
 	idxd_unregister_devices(idxd);
 	/*
@@ -790,12 +689,8 @@ static void idxd_remove(struct pci_dev *pdev)
 	if (device_pasid_enabled(idxd))
 		idxd_disable_system_pasid(idxd);
 
-	for (i = 0; i < msixcnt; i++) {
-		irq_entry = idxd_get_ie(idxd, i);
-		free_irq(irq_entry->vector, irq_entry);
-	}
-	idxd_msix_perm_clear(idxd);
-	idxd_release_int_handles(idxd);
+	irq_entry = idxd_get_ie(idxd, 0);
+	free_irq(irq_entry->vector, irq_entry);
 	pci_free_irq_vectors(pdev);
 	pci_iounmap(pdev, idxd->reg_base);
 	iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index a50f1bec9a4e..f94482bfeb6b 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -158,6 +158,9 @@ static void idxd_int_handle_revoke(struct work_struct *work)
 		struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
 		struct idxd_wq *wq = ie_to_wq(ie);
 
+		if (ie->int_handle == INVALID_INT_HANDLE)
+			continue;
+
 		rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX);
 		if (rc < 0) {
 			dev_warn(dev, "get int handle %d failed: %d\n", i, rc);
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index c750eac09fc9..a8f0ff75c430 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -28,6 +28,7 @@ enum idxd_scmd_stat {
 	IDXD_SCMD_WQ_NONE_CONFIGURED = 0x800d0000,
 	IDXD_SCMD_WQ_NO_SIZE = 0x800e0000,
 	IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
+	IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
 };
 
 #define IDXD_SCMD_SOFTERR_MASK	0x80000000



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 0/3] refactor driver to only enable irq for wq enable
  2021-12-13 18:51 [PATCH 0/3] refactor driver to only enable irq for wq enable Dave Jiang
                   ` (2 preceding siblings ...)
  2021-12-13 18:51 ` [PATCH 3/3] dmaengine: idxd: change MSIX allocation based on per wq activation Dave Jiang
@ 2022-01-05  7:42 ` Vinod Koul
  3 siblings, 0 replies; 5+ messages in thread
From: Vinod Koul @ 2022-01-05  7:42 UTC (permalink / raw)
  To: Dave Jiang; +Cc: dmaengine

On 13-12-21, 11:51, Dave Jiang wrote:
> This patch series refactors the code to enable irq on wq enable only
> when necessary instead of allocation all msix vectors at probe. This saves
> CPU irq vector allocation on x86 platforms.

Applied, thanks

-- 
~Vinod

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-01-05  7:42 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-12-13 18:51 [PATCH 0/3] refactor driver to only enable irq for wq enable Dave Jiang
2021-12-13 18:51 ` [PATCH 1/3] dmaengine: idxd: embed irq_entry in idxd_wq struct Dave Jiang
2021-12-13 18:51 ` [PATCH 2/3] dmaengine: idxd: fix descriptor flushing locking Dave Jiang
2021-12-13 18:51 ` [PATCH 3/3] dmaengine: idxd: change MSIX allocation based on per wq activation Dave Jiang
2022-01-05  7:42 ` [PATCH 0/3] refactor driver to only enable irq for wq enable Vinod Koul

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox