netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 01/15] net: alteon: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 02/15] net: xgbe: " Allen Pais
                   ` (13 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Jes Sorensen, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the alteon driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/alteon/acenic.c | 26 +++++++++++++-------------
 drivers/net/ethernet/alteon/acenic.h |  8 ++++----
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 3d8ac63132fb..9e6f91df2ba0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1560,9 +1560,9 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
 }
 
 
-static void ace_tasklet(struct tasklet_struct *t)
+static void ace_bh_work(struct work_struct *work)
 {
-	struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+	struct ace_private *ap = from_work(ap, work, ace_bh_work);
 	struct net_device *dev = ap->ndev;
 	int cur_size;
 
@@ -1595,7 +1595,7 @@ static void ace_tasklet(struct tasklet_struct *t)
 #endif
 		ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
 	}
-	ap->tasklet_pending = 0;
+	ap->bh_work_pending = 0;
 }
 
 
@@ -1617,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap)
  *
  * Loading rings is safe without holding the spin lock since this is
  * done only before the device is enabled, thus no interrupts are
- * generated and by the interrupt handler/tasklet handler.
+ * generated and by the interrupt handler/bh handler.
  */
 static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
 {
@@ -2160,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
 	 */
 	if (netif_running(dev)) {
 		int cur_size;
-		int run_tasklet = 0;
+		int run_bh_work = 0;
 
 		cur_size = atomic_read(&ap->cur_rx_bufs);
 		if (cur_size < RX_LOW_STD_THRES) {
@@ -2172,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
 				ace_load_std_rx_ring(dev,
 						     RX_RING_SIZE - cur_size);
 			} else
-				run_tasklet = 1;
+				run_bh_work = 1;
 		}
 
 		if (!ACE_IS_TIGON_I(ap)) {
@@ -2188,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
 					ace_load_mini_rx_ring(dev,
 							      RX_MINI_SIZE - cur_size);
 				} else
-					run_tasklet = 1;
+					run_bh_work = 1;
 			}
 		}
 
@@ -2205,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
 					ace_load_jumbo_rx_ring(dev,
 							       RX_JUMBO_SIZE - cur_size);
 				} else
-					run_tasklet = 1;
+					run_bh_work = 1;
 			}
 		}
-		if (run_tasklet && !ap->tasklet_pending) {
-			ap->tasklet_pending = 1;
-			tasklet_schedule(&ap->ace_tasklet);
+		if (run_bh_work && !ap->bh_work_pending) {
+			ap->bh_work_pending = 1;
+			queue_work(system_bh_wq, &ap->ace_bh_work);
 		}
 	}
 
@@ -2267,7 +2267,7 @@ static int ace_open(struct net_device *dev)
 	/*
 	 * Setup the bottom half rx ring refill handler
 	 */
-	tasklet_setup(&ap->ace_tasklet, ace_tasklet);
+	INIT_WORK(&ap->ace_bh_work, ace_bh_work);
 	return 0;
 }
 
@@ -2301,7 +2301,7 @@ static int ace_close(struct net_device *dev)
 	cmd.idx = 0;
 	ace_issue_cmd(regs, &cmd);
 
-	tasklet_kill(&ap->ace_tasklet);
+	cancel_work_sync(&ap->ace_bh_work);
 
 	/*
 	 * Make sure one CPU is not processing packets while
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index ca5ce0cbbad1..0e45a97b9c9b 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -2,7 +2,7 @@
 #ifndef _ACENIC_H_
 #define _ACENIC_H_
 #include <linux/interrupt.h>
-
+#include <linux/workqueue.h>
 
 /*
  * Generate TX index update each time, when TX ring is closed.
@@ -667,8 +667,8 @@ struct ace_private
 	struct rx_desc		*rx_mini_ring;
 	struct rx_desc		*rx_return_ring;
 
-	int			tasklet_pending, jumbo;
-	struct tasklet_struct	ace_tasklet;
+	int			bh_work_pending, jumbo;
+	struct work_struct	ace_bh_work;
 
 	struct event		*evt_ring;
 
@@ -776,7 +776,7 @@ static int ace_open(struct net_device *dev);
 static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
 				  struct net_device *dev);
 static int ace_close(struct net_device *dev);
-static void ace_tasklet(struct tasklet_struct *t);
+static void ace_bh_work(struct work_struct *work);
 static void ace_dump_trace(struct ace_private *ap);
 static void ace_set_multicast_list(struct net_device *dev);
 static int ace_change_mtu(struct net_device *dev, int new_mtu);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 02/15] net: xgbe: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
  2024-06-21  5:05 ` [PATCH 01/15] net: alteon: Convert tasklet API to new bottom half workqueue mechanism Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 03/15] net: cnic: " Allen Pais
                   ` (12 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Shyam Sundar S K, David S. Miller, Eric Dumazet,
	Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the xgbe driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/amd/xgbe/xgbe-drv.c  | 30 +++++++++++------------
 drivers/net/ethernet/amd/xgbe/xgbe-i2c.c  | 16 ++++++------
 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 16 ++++++------
 drivers/net/ethernet/amd/xgbe/xgbe-pci.c  |  4 +--
 drivers/net/ethernet/amd/xgbe/xgbe.h      | 10 ++++----
 5 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c4a4e316683f..5475867708f4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -403,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
 	return false;
 }
 
-static void xgbe_ecc_isr_task(struct tasklet_struct *t)
+static void xgbe_ecc_isr_bh_work(struct work_struct *work)
 {
-	struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
+	struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
 	unsigned int ecc_isr;
 	bool stop = false;
 
@@ -465,17 +465,17 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
 {
 	struct xgbe_prv_data *pdata = data;
 
-	if (pdata->isr_as_tasklet)
-		tasklet_schedule(&pdata->tasklet_ecc);
+	if (pdata->isr_as_bh_work)
+		queue_work(system_bh_wq, &pdata->ecc_bh_work);
 	else
-		xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+		xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
 
 	return IRQ_HANDLED;
 }
 
-static void xgbe_isr_task(struct tasklet_struct *t)
+static void xgbe_isr_bh_work(struct work_struct *work)
 {
-	struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
+	struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	struct xgbe_channel *channel;
 	unsigned int dma_isr, dma_ch_isr;
@@ -582,7 +582,7 @@ static void xgbe_isr_task(struct tasklet_struct *t)
 
 	/* If there is not a separate ECC irq, handle it here */
 	if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
-		xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+		xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
 
 	/* If there is not a separate I2C irq, handle it here */
 	if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -604,10 +604,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
 {
 	struct xgbe_prv_data *pdata = data;
 
-	if (pdata->isr_as_tasklet)
-		tasklet_schedule(&pdata->tasklet_dev);
+	if (pdata->isr_as_bh_work)
+		queue_work(system_bh_wq, &pdata->dev_bh_work);
 	else
-		xgbe_isr_task(&pdata->tasklet_dev);
+		xgbe_isr_bh_work(&pdata->dev_bh_work);
 
 	return IRQ_HANDLED;
 }
@@ -1007,8 +1007,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
 	unsigned int i;
 	int ret;
 
-	tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
-	tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
+	INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work);
+	INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work);
 
 	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
 			       netdev_name(netdev), pdata);
@@ -1078,8 +1078,8 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
 
 	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
 
-	tasklet_kill(&pdata->tasklet_dev);
-	tasklet_kill(&pdata->tasklet_ecc);
+	cancel_work_sync(&pdata->dev_bh_work);
+	cancel_work_sync(&pdata->ecc_bh_work);
 
 	if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
 		devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index a9ccc4258ee5..7a833894f52a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,9 +274,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
 		XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
 }
 
-static void xgbe_i2c_isr_task(struct tasklet_struct *t)
+static void xgbe_i2c_isr_bh_work(struct work_struct *work)
 {
-	struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
+	struct xgbe_prv_data *pdata = from_work(pdata, work, i2c_bh_work);
 	struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
 	unsigned int isr;
 
@@ -321,10 +321,10 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
 {
 	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
 
-	if (pdata->isr_as_tasklet)
-		tasklet_schedule(&pdata->tasklet_i2c);
+	if (pdata->isr_as_bh_work)
+		queue_work(system_bh_wq, &pdata->i2c_bh_work);
 	else
-		xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+		xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
 
 	return IRQ_HANDLED;
 }
@@ -369,7 +369,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
 
 static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
 {
-	xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+	xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
 
 	return IRQ_HANDLED;
 }
@@ -449,7 +449,7 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
 
 	if (pdata->dev_irq != pdata->i2c_irq) {
 		devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
-		tasklet_kill(&pdata->tasklet_i2c);
+		cancel_work_sync(&pdata->i2c_bh_work);
 	}
 }
 
@@ -464,7 +464,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
 
 	/* If we have a separate I2C irq, enable it */
 	if (pdata->dev_irq != pdata->i2c_irq) {
-		tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
+		INIT_WORK(&pdata->i2c_bh_work, xgbe_i2c_isr_bh_work);
 
 		ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
 				       xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4a2dc705b528..07f4f3418d01 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -703,9 +703,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
 	}
 }
 
-static void xgbe_an_isr_task(struct tasklet_struct *t)
+static void xgbe_an_isr_bh_work(struct work_struct *work)
 {
-	struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
+	struct xgbe_prv_data *pdata = from_work(pdata, work, an_bh_work);
 
 	netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
 
@@ -727,17 +727,17 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
 {
 	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
 
-	if (pdata->isr_as_tasklet)
-		tasklet_schedule(&pdata->tasklet_an);
+	if (pdata->isr_as_bh_work)
+		queue_work(system_bh_wq, &pdata->an_bh_work);
 	else
-		xgbe_an_isr_task(&pdata->tasklet_an);
+		xgbe_an_isr_bh_work(&pdata->an_bh_work);
 
 	return IRQ_HANDLED;
 }
 
 static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
 {
-	xgbe_an_isr_task(&pdata->tasklet_an);
+	xgbe_an_isr_bh_work(&pdata->an_bh_work);
 
 	return IRQ_HANDLED;
 }
@@ -1454,7 +1454,7 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
 
 	if (pdata->dev_irq != pdata->an_irq) {
 		devm_free_irq(pdata->dev, pdata->an_irq, pdata);
-		tasklet_kill(&pdata->tasklet_an);
+		cancel_work_sync(&pdata->an_bh_work);
 	}
 
 	pdata->phy_if.phy_impl.stop(pdata);
@@ -1477,7 +1477,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
 
 	/* If we have a separate AN irq, enable it */
 	if (pdata->dev_irq != pdata->an_irq) {
-		tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
+		INIT_WORK(&pdata->an_bh_work, xgbe_an_isr_bh_work);
 
 		ret = devm_request_irq(pdata->dev, pdata->an_irq,
 				       xgbe_an_isr, 0, pdata->an_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index c5e5fac49779..c636999a6a84 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -139,7 +139,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
 		return ret;
 	}
 
-	pdata->isr_as_tasklet = 1;
+	pdata->isr_as_bh_work = 1;
 	pdata->irq_count = ret;
 
 	pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
@@ -176,7 +176,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
 		return ret;
 	}
 
-	pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
+	pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0;
 	pdata->irq_count = 1;
 	pdata->channel_irq_count = 1;
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f01a1e566da6..d85386cac8d1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1298,11 +1298,11 @@ struct xgbe_prv_data {
 
 	unsigned int lpm_ctrl;		/* CTRL1 for resume */
 
-	unsigned int isr_as_tasklet;
-	struct tasklet_struct tasklet_dev;
-	struct tasklet_struct tasklet_ecc;
-	struct tasklet_struct tasklet_i2c;
-	struct tasklet_struct tasklet_an;
+	unsigned int isr_as_bh_work;
+	struct work_struct dev_bh_work;
+	struct work_struct ecc_bh_work;
+	struct work_struct i2c_bh_work;
+	struct work_struct an_bh_work;
 
 	struct dentry *xgbe_debugfs;
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 03/15] net: cnic: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
  2024-06-21  5:05 ` [PATCH 01/15] net: alteon: Convert tasklet API to new bottom half workqueue mechanism Allen Pais
  2024-06-21  5:05 ` [PATCH 02/15] net: xgbe: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 04/15] net: macb: " Allen Pais
                   ` (11 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the cnic driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/broadcom/cnic.c | 19 ++++++++++---------
 drivers/net/ethernet/broadcom/cnic.h |  2 +-
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c2b4188a1ef1..a9040c42d2ff 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,6 +31,7 @@
 #include <linux/if_vlan.h>
 #include <linux/prefetch.h>
 #include <linux/random.h>
+#include <linux/workqueue.h>
 #if IS_ENABLED(CONFIG_VLAN_8021Q)
 #define BCM_VLAN 1
 #endif
@@ -3015,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
 	return cnic_service_bnx2_queues(dev);
 }
 
-static void cnic_service_bnx2_msix(struct tasklet_struct *t)
+static void cnic_service_bnx2_msix(struct work_struct *work)
 {
-	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+	struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
 	struct cnic_dev *dev = cp->dev;
 
 	cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3036,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev)
 		prefetch(cp->status_blk.gen);
 		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
 
-		tasklet_schedule(&cp->cnic_irq_task);
+		queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
 	}
 }
 
@@ -3140,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
 	return last_status;
 }
 
-static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
+static void cnic_service_bnx2x_bh_work(struct work_struct *work)
 {
-	struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+	struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
 	struct cnic_dev *dev = cp->dev;
 	struct bnx2x *bp = netdev_priv(dev->netdev);
 	u32 status_idx, new_status_idx;
@@ -4428,7 +4429,7 @@ static void cnic_free_irq(struct cnic_dev *dev)
 
 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
 		cp->disable_int_sync(dev);
-		tasklet_kill(&cp->cnic_irq_task);
+		cancel_work_sync(&cp->cnic_irq_bh_work);
 		free_irq(ethdev->irq_arr[0].vector, dev);
 	}
 }
@@ -4441,7 +4442,7 @@ static int cnic_request_irq(struct cnic_dev *dev)
 
 	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
 	if (err)
-		tasklet_disable(&cp->cnic_irq_task);
+		disable_work_sync(&cp->cnic_irq_bh_work);
 
 	return err;
 }
@@ -4464,7 +4465,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
 		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
 
 		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
-		tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
+		INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
 		err = cnic_request_irq(dev);
 		if (err)
 			return err;
@@ -4873,7 +4874,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
 	struct cnic_eth_dev *ethdev = cp->ethdev;
 	int err = 0;
 
-	tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
+	INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
 		err = cnic_request_irq(dev);
 
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index fedc84ada937..1a314a75d2d2 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -268,7 +268,7 @@ struct cnic_local {
 	u32				bnx2x_igu_sb_id;
 	u32				int_num;
 	u32				last_status_idx;
-	struct tasklet_struct		cnic_irq_task;
+	struct work_struct		cnic_irq_bh_work;
 
 	struct kcqe		*completed_kcq[MAX_COMPLETED_KCQE];
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 04/15] net: macb: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (2 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 03/15] net: cnic: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 05/15] net: cavium/liquidio: " Allen Pais
                   ` (10 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Nicolas Ferre, Claudiu Beznea, David S. Miller,
	Eric Dumazet, Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the macb driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/cadence/macb.h      |  3 ++-
 drivers/net/ethernet/cadence/macb_main.c | 10 +++++-----
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index aa5700ac9c00..e570cad705d2 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,6 +13,7 @@
 #include <linux/net_tstamp.h>
 #include <linux/interrupt.h>
 #include <linux/phy/phy.h>
+#include <linux/workqueue.h>
 
 #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
 #define MACB_EXT_DESC
@@ -1322,7 +1323,7 @@ struct macb {
 	spinlock_t rx_fs_lock;
 	unsigned int max_tuples;
 
-	struct tasklet_struct	hresp_err_tasklet;
+	struct work_struct	hresp_err_bh_work;
 
 	int	rx_bd_rd_prefetch;
 	int	tx_bd_rd_prefetch;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 241ce9a2fa99..0dc21a9ae215 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1792,9 +1792,9 @@ static int macb_tx_poll(struct napi_struct *napi, int budget)
 	return work_done;
 }
 
-static void macb_hresp_error_task(struct tasklet_struct *t)
+static void macb_hresp_error_task(struct work_struct *work)
 {
-	struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
+	struct macb *bp = from_work(bp, work, hresp_err_bh_work);
 	struct net_device *dev = bp->dev;
 	struct macb_queue *queue;
 	unsigned int q;
@@ -1994,7 +1994,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
 		}
 
 		if (status & MACB_BIT(HRESP)) {
-			tasklet_schedule(&bp->hresp_err_tasklet);
+			queue_work(system_bh_wq, &bp->hresp_err_bh_work);
 			netdev_err(dev, "DMA bus error: HRESP not OK\n");
 
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -5150,7 +5150,7 @@ static int macb_probe(struct platform_device *pdev)
 		goto err_out_unregister_mdio;
 	}
 
-	tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
+	INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
 
 	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
 		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
@@ -5194,7 +5194,7 @@ static void macb_remove(struct platform_device *pdev)
 		mdiobus_free(bp->mii_bus);
 
 		unregister_netdev(dev);
-		tasklet_kill(&bp->hresp_err_tasklet);
+		cancel_work_sync(&bp->hresp_err_bh_work);
 		pm_runtime_disable(&pdev->dev);
 		pm_runtime_dont_use_autosuspend(&pdev->dev);
 		if (!pm_runtime_suspended(&pdev->dev)) {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 05/15] net: cavium/liquidio: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (3 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 04/15] net: macb: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21 11:45   ` Sunil Kovvuri Goutham
  2024-06-21  5:05 ` [PATCH 06/15] net: octeon: " Allen Pais
                   ` (9 subsequent siblings)
  14 siblings, 1 reply; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the cavium/liquidio driver. This transition ensures
compatibility with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 .../net/ethernet/cavium/liquidio/lio_core.c   |  4 ++--
 .../net/ethernet/cavium/liquidio/lio_main.c   | 24 +++++++++----------
 .../ethernet/cavium/liquidio/lio_vf_main.c    | 10 ++++----
 .../ethernet/cavium/liquidio/octeon_droq.c    |  4 ++--
 .../ethernet/cavium/liquidio/octeon_main.h    |  4 ++--
 5 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 674c54831875..37307e02a6ff 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -925,7 +925,7 @@ int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
 			if (OCTEON_CN23XX_VF(oct))
 				dev_err(&oct->pci_dev->dev,
 					"should not come here should not get rx when poll mode = 0 for vf\n");
-			tasklet_schedule(&oct_priv->droq_tasklet);
+			queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 			return 1;
 		}
 		/* this will be flushed periodically by check iq db */
@@ -975,7 +975,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
 				droq->ops.napi_fn(droq);
 				oct_priv->napi_mask |= BIT_ULL(oq_no);
 			} else {
-				tasklet_schedule(&oct_priv->droq_tasklet);
+				queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 			}
 		}
 	}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 1d79f6eaa41f..d348656c2f38 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -150,12 +150,12 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
 static struct handshake handshake[MAX_OCTEON_DEVICES];
 static struct completion first_stage;
 
-static void octeon_droq_bh(struct tasklet_struct *t)
+static void octeon_droq_bh(struct work_struct *work)
 {
 	int q_no;
 	int reschedule = 0;
-	struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
-							  droq_tasklet);
+	struct octeon_device_priv *oct_priv = from_work(oct_priv, work,
+							  droq_bh_work);
 	struct octeon_device *oct = oct_priv->dev;
 
 	for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
@@ -180,7 +180,7 @@ static void octeon_droq_bh(struct tasklet_struct *t)
 	}
 
 	if (reschedule)
-		tasklet_schedule(&oct_priv->droq_tasklet);
+		queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 }
 
 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
@@ -199,7 +199,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
 		}
 		if (pkt_cnt > 0) {
 			pending_pkts += pkt_cnt;
-			tasklet_schedule(&oct_priv->droq_tasklet);
+			queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 		}
 		pkt_cnt = 0;
 		schedule_timeout_uninterruptible(1);
@@ -1130,7 +1130,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
 		break;
 	}                       /* end switch (oct->status) */
 
-	tasklet_kill(&oct_priv->droq_tasklet);
+	cancel_work_sync(&oct_priv->droq_bh_work);
 }
 
 /**
@@ -1234,7 +1234,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 		netif_napi_del(napi);
 
-	tasklet_enable(&oct_priv->droq_tasklet);
+	enable_and_queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 
 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
 		unregister_netdev(netdev);
@@ -1770,7 +1770,7 @@ static int liquidio_open(struct net_device *netdev)
 	int ret = 0;
 
 	if (oct->props[lio->ifidx].napi_enabled == 0) {
-		tasklet_disable(&oct_priv->droq_tasklet);
+		disable_work_sync(&oct_priv->droq_bh_work);
 
 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 			napi_enable(napi);
@@ -1896,7 +1896,7 @@ static int liquidio_stop(struct net_device *netdev)
 		if (OCTEON_CN23XX_PF(oct))
 			oct->droq[0]->ops.poll_mode = 0;
 
-		tasklet_enable(&oct_priv->droq_tasklet);
+		enable_and_queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 	}
 
 	dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
@@ -4204,9 +4204,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
 		}
 	}
 
-	/* Initialize the tasklet that handles output queue packet processing.*/
-	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
-	tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
+	/* Initialize the bh work that handles output queue packet processing.*/
+	dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq bh work\n");
+	INIT_WORK(&oct_priv->droq_bh_work, octeon_droq_bh);
 
 	/* Setup the interrupt handler and record the INT SUM register address
 	 */
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 62c2eadc33e3..04117625f388 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -87,7 +87,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
 		}
 		if (pkt_cnt > 0) {
 			pending_pkts += pkt_cnt;
-			tasklet_schedule(&oct_priv->droq_tasklet);
+			queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 		}
 		pkt_cnt = 0;
 		schedule_timeout_uninterruptible(1);
@@ -584,7 +584,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
 		break;
 	}
 
-	tasklet_kill(&oct_priv->droq_tasklet);
+	cancel_work_sync(&oct_priv->droq_bh_work);
 }
 
 /**
@@ -687,7 +687,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
 	list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 		netif_napi_del(napi);
 
-	tasklet_enable(&oct_priv->droq_tasklet);
+	enable_and_queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 
 	if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
 		unregister_netdev(netdev);
@@ -911,7 +911,7 @@ static int liquidio_open(struct net_device *netdev)
 	int ret = 0;
 
 	if (!oct->props[lio->ifidx].napi_enabled) {
-		tasklet_disable(&oct_priv->droq_tasklet);
+		disable_work_sync(&oct_priv->droq_bh_work);
 
 		list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
 			napi_enable(napi);
@@ -986,7 +986,7 @@ static int liquidio_stop(struct net_device *netdev)
 
 		oct->droq[0]->ops.poll_mode = 0;
 
-		tasklet_enable(&oct_priv->droq_tasklet);
+		enable_and_queue_work(system_bh_wq, &oct_priv->droq_bh_work);
 	}
 
 	cancel_delayed_work_sync(&lio->stats_wk.work);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index eef12fdd246d..4e5f8bbc891b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -96,7 +96,7 @@ u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
 	last_count = pkt_count - droq->pkt_count;
 	droq->pkt_count = pkt_count;
 
-	/* we shall write to cnts  at napi irq enable or end of droq tasklet */
+	/* we shall write to cnts  at napi irq enable or end of droq bh_work */
 	if (last_count)
 		atomic_add(last_count, &droq->pkts_pending);
 
@@ -764,7 +764,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
 				(u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
 	}
 
-	/* If there are packets pending. schedule tasklet again */
+	/* If there are packets pending. schedule bh_work again */
 	if (atomic_read(&droq->pkts_pending))
 		return 1;
 
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 5b4cb725f60f..a8f2a0a7b08e 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -24,6 +24,7 @@
 #define  _OCTEON_MAIN_H_
 
 #include <linux/sched/signal.h>
+#include <linux/workqueue.h>
 
 #if BITS_PER_LONG == 32
 #define CVM_CAST64(v) ((long long)(v))
@@ -36,8 +37,7 @@
 #define DRV_NAME "LiquidIO"
 
 struct octeon_device_priv {
-	/** Tasklet structures for this device. */
-	struct tasklet_struct droq_tasklet;
+	struct work_struct droq_bh_work;
 	unsigned long napi_mask;
 	struct octeon_device *dev;
 };
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 06/15] net: octeon: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (4 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 05/15] net: cavium/liquidio: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 07/15] net: thunderx: " Allen Pais
                   ` (8 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the cavium/octeon driver. This transition ensures
compatibility with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 744f2434f7fa..0db993c1cc36 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -13,6 +13,7 @@
 #include <linux/net_tstamp.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
+#include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/if_vlan.h>
 #include <linux/of_mdio.h>
@@ -144,7 +145,7 @@ struct octeon_mgmt {
 	unsigned int last_speed;
 	struct device *dev;
 	struct napi_struct napi;
-	struct tasklet_struct tx_clean_tasklet;
+	struct work_struct tx_clean_bh_work;
 	struct device_node *phy_np;
 	resource_size_t mix_phys;
 	resource_size_t mix_size;
@@ -315,9 +316,9 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
 		netif_wake_queue(p->netdev);
 }
 
-static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
+static void octeon_mgmt_clean_tx_bh_work(struct work_struct *work)
 {
-	struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
+	struct octeon_mgmt *p = from_work(p, work, tx_clean_bh_work);
 	octeon_mgmt_clean_tx_buffers(p);
 	octeon_mgmt_enable_tx_irq(p);
 }
@@ -684,7 +685,7 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
 	}
 	if (mixx_isr.s.orthresh) {
 		octeon_mgmt_disable_tx_irq(p);
-		tasklet_schedule(&p->tx_clean_tasklet);
+		queue_work(system_bh_wq, &p->tx_clean_bh_work);
 	}
 
 	return IRQ_HANDLED;
@@ -1487,8 +1488,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
 
 	skb_queue_head_init(&p->tx_list);
 	skb_queue_head_init(&p->rx_list);
-	tasklet_setup(&p->tx_clean_tasklet,
-		      octeon_mgmt_clean_tx_tasklet);
+	INIT_WORK(&p->tx_clean_bh_work,
+		  octeon_mgmt_clean_tx_bh_work);
 
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 07/15] net: thunderx: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (5 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 06/15] net: octeon: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21 11:49   ` Sunil Kovvuri Goutham
  2024-06-21  5:05 ` [PATCH 08/15] net: chelsio: " Allen Pais
                   ` (7 subsequent siblings)
  14 siblings, 1 reply; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Sunil Goutham, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, linux-arm-kernel, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the cavium/thunderx driver. This transition ensures
compatibility with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/cavium/thunder/nic.h     |  5 ++--
 .../net/ethernet/cavium/thunder/nicvf_main.c  | 24 +++++++++----------
 .../ethernet/cavium/thunder/nicvf_queues.c    |  4 ++--
 .../ethernet/cavium/thunder/nicvf_queues.h    |  2 +-
 4 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 090d6b83982a..ecc175b6e7fa 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -8,6 +8,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/interrupt.h>
+#include <linux/workqueue.h>
 #include <linux/pci.h>
 #include "thunder_bgx.h"
 
@@ -295,7 +296,7 @@ struct nicvf {
 	bool			rb_work_scheduled;
 	struct page		*rb_page;
 	struct delayed_work	rbdr_work;
-	struct tasklet_struct	rbdr_task;
+	struct work_struct	rbdr_bh_work;
 
 	/* Secondary Qset */
 	u8			sqs_count;
@@ -319,7 +320,7 @@ struct nicvf {
 	bool			loopback_supported;
 	struct nicvf_rss_info	rss_info;
 	struct nicvf_pfc	pfc;
-	struct tasklet_struct	qs_err_task;
+	struct work_struct	qs_err_bh_work;
 	struct work_struct	reset_task;
 	struct nicvf_work       rx_mode_work;
 	/* spinlock to protect workqueue arguments from concurrent access */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aebb9fef3f6e..b0878bd25cf0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -982,9 +982,9 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
  *
  * As of now only CQ errors are handled
  */
-static void nicvf_handle_qs_err(struct tasklet_struct *t)
+static void nicvf_handle_qs_err(struct work_struct *work)
 {
-	struct nicvf *nic = from_tasklet(nic, t, qs_err_task);
+	struct nicvf *nic = from_work(nic, work, qs_err_bh_work);
 	struct queue_set *qs = nic->qs;
 	int qidx;
 	u64 status;
@@ -1069,7 +1069,7 @@ static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
 		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
 			continue;
 		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
-		tasklet_hi_schedule(&nic->rbdr_task);
+		queue_work(system_bh_highpri_wq, &nic->rbdr_bh_work);
 		/* Clear interrupt */
 		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
 	}
@@ -1085,7 +1085,7 @@ static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
 
 	/* Disable Qset err interrupt and schedule softirq */
 	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
-	tasklet_hi_schedule(&nic->qs_err_task);
+	queue_work(system_bh_highpri_wq, &nic->qs_err_bh_work);
 	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
 
 	return IRQ_HANDLED;
@@ -1364,8 +1364,8 @@ int nicvf_stop(struct net_device *netdev)
 	for (irq = 0; irq < nic->num_vec; irq++)
 		synchronize_irq(pci_irq_vector(nic->pdev, irq));
 
-	tasklet_kill(&nic->rbdr_task);
-	tasklet_kill(&nic->qs_err_task);
+	cancel_work_sync(&nic->rbdr_bh_work);
+	cancel_work_sync(&nic->qs_err_bh_work);
 	if (nic->rb_work_scheduled)
 		cancel_delayed_work_sync(&nic->rbdr_work);
 
@@ -1488,11 +1488,11 @@ int nicvf_open(struct net_device *netdev)
 		nicvf_hw_set_mac_addr(nic, netdev);
 	}
 
-	/* Init tasklet for handling Qset err interrupt */
-	tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err);
+	/* Init bh_work for handling Qset err interrupt */
+	INIT_WORK(&nic->qs_err_bh_work, nicvf_handle_qs_err);
 
-	/* Init RBDR tasklet which will refill RBDR */
-	tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task);
+	/* Init RBDR bh_work which will refill RBDR */
+	INIT_WORK(&nic->rbdr_bh_work, nicvf_rbdr_bh_work);
 	INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
 
 	/* Configure CPI alorithm */
@@ -1561,8 +1561,8 @@ int nicvf_open(struct net_device *netdev)
 cleanup:
 	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
 	nicvf_unregister_interrupts(nic);
-	tasklet_kill(&nic->qs_err_task);
-	tasklet_kill(&nic->rbdr_task);
+	cancel_work_sync(&nic->qs_err_bh_work);
+	cancel_work_sync(&nic->rbdr_bh_work);
 napi_del:
 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
 		cq_poll = nic->napi[qidx];
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06397cc8bb36..ad71160879e4 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -461,9 +461,9 @@ void nicvf_rbdr_work(struct work_struct *work)
 }
 
 /* In Softirq context, alloc rcv buffers in atomic mode */
-void nicvf_rbdr_task(struct tasklet_struct *t)
+void nicvf_rbdr_bh_work(struct work_struct *work)
 {
-	struct nicvf *nic = from_tasklet(nic, t, rbdr_task);
+	struct nicvf *nic = from_work(nic, work, rbdr_bh_work);
 
 	nicvf_refill_rbdr(nic, GFP_ATOMIC);
 	if (nic->rb_alloc_fail) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8453defc296c..c6f18fb7c50e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -348,7 +348,7 @@ void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
 
 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
 				  struct cqe_rx_t *cqe_rx, bool xdp);
-void nicvf_rbdr_task(struct tasklet_struct *t);
+void nicvf_rbdr_bh_work(struct work_struct *work);
 void nicvf_rbdr_work(struct work_struct *work);
 
 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 08/15] net: chelsio: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (6 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 07/15] net: thunderx: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 09/15] net: sundance: " Allen Pais
                   ` (6 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, David S. Miller, Eric Dumazet, Paolo Abeni,
	Potnuri Bharat Teja
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the chelsio driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/chelsio/cxgb/sge.c       | 19 ++++-----
 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h    |  9 +++--
 .../net/ethernet/chelsio/cxgb4/cxgb4_main.c   |  2 +-
 .../ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c  |  4 +-
 .../net/ethernet/chelsio/cxgb4/cxgb4_uld.c    |  2 +-
 drivers/net/ethernet/chelsio/cxgb4/sge.c      | 40 +++++++++----------
 drivers/net/ethernet/chelsio/cxgb4vf/sge.c    |  6 +--
 7 files changed, 42 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 861edff5ed89..4dab9b0dca86 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -229,11 +229,11 @@ struct sched {
 	unsigned int	port;		/* port index (round robin ports) */
 	unsigned int	num;		/* num skbs in per port queues */
 	struct sched_port p[MAX_NPORTS];
-	struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+	struct work_struct sched_bh_work;/* bh_work used to run scheduler */
 	struct sge *sge;
 };
 
-static void restart_sched(struct tasklet_struct *t);
+static void restart_sched(struct work_struct *work);
 
 
 /*
@@ -270,14 +270,14 @@ static const u8 ch_mac_addr[ETH_ALEN] = {
 };
 
 /*
- * stop tasklet and free all pending skb's
+ * stop bh_work and free all pending skb's
  */
 static void tx_sched_stop(struct sge *sge)
 {
 	struct sched *s = sge->tx_sched;
 	int i;
 
-	tasklet_kill(&s->sched_tsk);
+	cancel_work_sync(&s->sched_bh_work);
 
 	for (i = 0; i < MAX_NPORTS; i++)
 		__skb_queue_purge(&s->p[s->port].skbq);
@@ -371,7 +371,7 @@ static int tx_sched_init(struct sge *sge)
 		return -ENOMEM;
 
 	pr_debug("tx_sched_init\n");
-	tasklet_setup(&s->sched_tsk, restart_sched);
+	INIT_WORK(&s->sched_bh_work, restart_sched);
 	s->sge = sge;
 	sge->tx_sched = s;
 
@@ -1300,12 +1300,12 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
 }
 
 /*
- * Called from tasklet. Checks the scheduler for any
+ * Called from bh context. Checks the scheduler for any
  * pending skbs that can be sent.
  */
-static void restart_sched(struct tasklet_struct *t)
+static void restart_sched(struct work_struct *work)
 {
-	struct sched *s = from_tasklet(s, t, sched_tsk);
+	struct sched *s = from_work(s, work, sched_bh_work);
 	struct sge *sge = s->sge;
 	struct adapter *adapter = sge->adapter;
 	struct cmdQ *q = &sge->cmdQ[0];
@@ -1451,7 +1451,8 @@ static unsigned int update_tx_info(struct adapter *adapter,
 			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
 		}
 		if (sge->tx_sched)
-			tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
+			queue_work(system_bh_highpri_wq,
+				   &sge->tx_sched->sched_bh_work);
 
 		flags &= ~F_CMDQ0_ENABLE;
 	}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index fca9533bc011..846040f5e638 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -53,6 +53,7 @@
 #include <linux/ptp_clock_kernel.h>
 #include <linux/ptp_classify.h>
 #include <linux/crash_dump.h>
+#include <linux/workqueue.h>
 #include <linux/thermal.h>
 #include <asm/io.h>
 #include "t4_chip_type.h"
@@ -880,7 +881,7 @@ struct sge_uld_txq {               /* state for an SGE offload Tx queue */
 	struct sge_txq q;
 	struct adapter *adap;
 	struct sk_buff_head sendq;  /* list of backpressured packets */
-	struct tasklet_struct qresume_tsk; /* restarts the queue */
+	struct work_struct qresume_bh_work; /* restarts the queue */
 	bool service_ofldq_running; /* service_ofldq() is processing sendq */
 	u8 full;                    /* the Tx ring is full */
 	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
@@ -890,7 +891,7 @@ struct sge_ctrl_txq {               /* state for an SGE control Tx queue */
 	struct sge_txq q;
 	struct adapter *adap;
 	struct sk_buff_head sendq;  /* list of backpressured packets */
-	struct tasklet_struct qresume_tsk; /* restarts the queue */
+	struct work_struct qresume_bh_work; /* restarts the queue */
 	u8 full;                    /* the Tx ring is full */
 } ____cacheline_aligned_in_smp;
 
@@ -946,7 +947,7 @@ struct sge_eosw_txq {
 
 	u32 hwqid; /* Underlying hardware queue index */
 	struct net_device *netdev; /* Pointer to netdevice */
-	struct tasklet_struct qresume_tsk; /* Restarts the queue */
+	struct work_struct qresume_bh_work; /* Restarts the queue */
 	struct completion completion; /* completion for FLOWC rendezvous */
 };
 
@@ -2107,7 +2108,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
 void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
 			      u32 ndesc);
 int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
-void cxgb4_ethofld_restart(struct tasklet_struct *t);
+void cxgb4_ethofld_restart(struct work_struct *work);
 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
 			     const struct pkt_gl *si);
 void free_txq(struct adapter *adap, struct sge_txq *q);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2418645c8823..179517e90da7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -589,7 +589,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 			struct sge_uld_txq *oq;
 
 			oq = container_of(txq, struct sge_uld_txq, q);
-			tasklet_schedule(&oq->qresume_tsk);
+			queue_work(system_bh_wq, &oq->qresume_bh_work);
 		}
 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
 		const struct cpl_fw6_msg *p = (void *)rsp;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 338b04f339b3..c165d3393e6e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -114,7 +114,7 @@ static int cxgb4_init_eosw_txq(struct net_device *dev,
 	eosw_txq->cred = adap->params.ofldq_wr_cred;
 	eosw_txq->hwqid = hwqid;
 	eosw_txq->netdev = dev;
-	tasklet_setup(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart);
+	INIT_WORK(&eosw_txq->qresume_bh_work, cxgb4_ethofld_restart);
 	return 0;
 }
 
@@ -143,7 +143,7 @@ static void cxgb4_free_eosw_txq(struct net_device *dev,
 	cxgb4_clean_eosw_txq(dev, eosw_txq);
 	kfree(eosw_txq->desc);
 	spin_unlock_bh(&eosw_txq->lock);
-	tasklet_kill(&eosw_txq->qresume_tsk);
+	cancel_work_sync(&eosw_txq->qresume_bh_work);
 }
 
 static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 5c13bcb4550d..d9bdf0b1eb69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -407,7 +407,7 @@ free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
 		struct sge_uld_txq *txq = &txq_info->uldtxq[i];
 
 		if (txq->q.desc) {
-			tasklet_kill(&txq->qresume_tsk);
+			cancel_work_sync(&txq->qresume_bh_work);
 			t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
 					txq->q.cntxt_id);
 			free_tx_desc(adap, &txq->q, txq->q.in_use, false);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index de52bcb884c4..d054979ef850 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2769,15 +2769,15 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
 
 /**
  *	restart_ctrlq - restart a suspended control queue
- *	@t: pointer to the tasklet associated with this handler
+ *	@work: pointer to the work struct associated with this handler
  *
  *	Resumes transmission on a suspended Tx control queue.
  */
-static void restart_ctrlq(struct tasklet_struct *t)
+static void restart_ctrlq(struct work_struct *work)
 {
 	struct sk_buff *skb;
 	unsigned int written = 0;
-	struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
+	struct sge_ctrl_txq *q = from_work(q, work, qresume_bh_work);
 
 	spin_lock(&q->sendq.lock);
 	reclaim_completed_tx_imm(&q->q);
@@ -3075,13 +3075,13 @@ static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
 
 /**
  *	restart_ofldq - restart a suspended offload queue
- *	@t: pointer to the tasklet associated with this handler
+ *	@work: pointer to the work struct associated with this handler
  *
  *	Resumes transmission on a suspended Tx offload queue.
  */
-static void restart_ofldq(struct tasklet_struct *t)
+static void restart_ofldq(struct work_struct *work)
 {
-	struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
+	struct sge_uld_txq *q = from_work(q, work, qresume_bh_work);
 
 	spin_lock(&q->sendq.lock);
 	q->full = 0;            /* the queue actually is completely empty now */
@@ -4020,10 +4020,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
 	return work_done;
 }
 
-void cxgb4_ethofld_restart(struct tasklet_struct *t)
+void cxgb4_ethofld_restart(struct work_struct *work)
 {
-	struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t,
-						     qresume_tsk);
+	struct sge_eosw_txq *eosw_txq = from_work(eosw_txq, work,
+						     qresume_bh_work);
 	int pktcount;
 
 	spin_lock(&eosw_txq->lock);
@@ -4050,7 +4050,7 @@ void cxgb4_ethofld_restart(struct tasklet_struct *t)
  * @si: the gather list of packet fragments
  *
  * Process a ETHOFLD Tx completion. Increment the cidx here, but
- * free up the descriptors in a tasklet later.
+ * free up the descriptors later in bh_work.
  */
 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
 			     const struct pkt_gl *si)
@@ -4117,10 +4117,10 @@ int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
 
 		spin_unlock(&eosw_txq->lock);
 
-		/* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
+		/* Schedule a bh work to reclaim SKBs and restart ETHOFLD Tx,
 		 * if there were packets waiting for completion.
 		 */
-		tasklet_schedule(&eosw_txq->qresume_tsk);
+		queue_work(system_bh_wq, &eosw_txq->qresume_bh_work);
 	}
 
 out_done:
@@ -4279,7 +4279,7 @@ static void sge_tx_timer_cb(struct timer_list *t)
 			struct sge_uld_txq *txq = s->egr_map[id];
 
 			clear_bit(id, s->txq_maperr);
-			tasklet_schedule(&txq->qresume_tsk);
+			queue_work(system_bh_wq, &txq->qresume_bh_work);
 		}
 
 	if (!is_t4(adap->params.chip)) {
@@ -4719,7 +4719,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
 	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
 	txq->adap = adap;
 	skb_queue_head_init(&txq->sendq);
-	tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
+	INIT_WORK(&txq->qresume_bh_work, restart_ctrlq);
 	txq->full = 0;
 	return 0;
 }
@@ -4809,7 +4809,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
 	txq->q.q_type = CXGB4_TXQ_ULD;
 	txq->adap = adap;
 	skb_queue_head_init(&txq->sendq);
-	tasklet_setup(&txq->qresume_tsk, restart_ofldq);
+	INIT_WORK(&txq->qresume_bh_work, restart_ofldq);
 	txq->full = 0;
 	txq->mapping_err = 0;
 	return 0;
@@ -4952,7 +4952,7 @@ void t4_free_sge_resources(struct adapter *adap)
 		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
 
 		if (cq->q.desc) {
-			tasklet_kill(&cq->qresume_tsk);
+			cancel_work_sync(&cq->qresume_bh_work);
 			t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
 					cq->q.cntxt_id);
 			__skb_queue_purge(&cq->sendq);
@@ -5002,7 +5002,7 @@ void t4_sge_start(struct adapter *adap)
  *	t4_sge_stop - disable SGE operation
  *	@adap: the adapter
  *
- *	Stop tasklets and timers associated with the DMA engine.  Note that
+ *	Stop bh works and timers associated with the DMA engine.  Note that
  *	this is effective only if measures have been taken to disable any HW
  *	events that may restart them.
  */
@@ -5025,7 +5025,7 @@ void t4_sge_stop(struct adapter *adap)
 
 			for_each_ofldtxq(&adap->sge, i) {
 				if (txq->q.desc)
-					tasklet_kill(&txq->qresume_tsk);
+					cancel_work_sync(&txq->qresume_bh_work);
 			}
 		}
 	}
@@ -5039,7 +5039,7 @@ void t4_sge_stop(struct adapter *adap)
 
 			for_each_ofldtxq(&adap->sge, i) {
 				if (txq->q.desc)
-					tasklet_kill(&txq->qresume_tsk);
+					cancel_work_sync(&txq->qresume_bh_work);
 			}
 		}
 	}
@@ -5048,7 +5048,7 @@ void t4_sge_stop(struct adapter *adap)
 		struct sge_ctrl_txq *cq = &s->ctrlq[i];
 
 		if (cq->q.desc)
-			tasklet_kill(&cq->qresume_tsk);
+			cancel_work_sync(&cq->qresume_bh_work);
 	}
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 5b1d746e6563..1f4628178d28 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2587,7 +2587,7 @@ void t4vf_free_sge_resources(struct adapter *adapter)
  *	t4vf_sge_start - enable SGE operation
  *	@adapter: the adapter
  *
- *	Start tasklets and timers associated with the DMA engine.
+ *	Start bh work and timers associated with the DMA engine.
  */
 void t4vf_sge_start(struct adapter *adapter)
 {
@@ -2600,7 +2600,7 @@ void t4vf_sge_start(struct adapter *adapter)
  *	t4vf_sge_stop - disable SGE operation
  *	@adapter: the adapter
  *
- *	Stop tasklets and timers associated with the DMA engine.  Note that
+ *	Stop bh works and timers associated with the DMA engine.  Note that
  *	this is effective only if measures have been taken to disable any HW
  *	events that may restart them.
  */
@@ -2692,7 +2692,7 @@ int t4vf_sge_init(struct adapter *adapter)
 	s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
 
 	/*
-	 * Set up tasklet timers.
+	 * Set up bh work timers.
 	 */
 	timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
 	timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 09/15] net: sundance: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (7 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 08/15] net: chelsio: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 10/15] net: hinic: " Allen Pais
                   ` (5 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Denis Kirjanov, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the dlink sundance driver. This transition ensures
compatibility with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/dlink/sundance.c | 41 ++++++++++++++-------------
 1 file changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 8af5ecec7d61..65dfd32a9656 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -86,6 +86,7 @@ static char *media[MAX_UNITS];
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
+#include <linux/workqueue.h>
 #include <linux/init.h>
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
@@ -395,8 +396,8 @@ struct netdev_private {
 	unsigned int an_enable:1;
 	unsigned int speed;
 	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
-	struct tasklet_struct rx_tasklet;
-	struct tasklet_struct tx_tasklet;
+	struct work_struct rx_bh_work;
+	struct work_struct tx_bh_work;
 	int budget;
 	int cur_task;
 	/* Multicast and receive mode. */
@@ -430,8 +431,8 @@ static void init_ring(struct net_device *dev);
 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 static int reset_tx (struct net_device *dev);
 static irqreturn_t intr_handler(int irq, void *dev_instance);
-static void rx_poll(struct tasklet_struct *t);
-static void tx_poll(struct tasklet_struct *t);
+static void rx_poll(struct work_struct *work);
+static void tx_poll(struct work_struct *work);
 static void refill_rx (struct net_device *dev);
 static void netdev_error(struct net_device *dev, int intr_status);
 static void netdev_error(struct net_device *dev, int intr_status);
@@ -541,8 +542,8 @@ static int sundance_probe1(struct pci_dev *pdev,
 	np->msg_enable = (1 << debug) - 1;
 	spin_lock_init(&np->lock);
 	spin_lock_init(&np->statlock);
-	tasklet_setup(&np->rx_tasklet, rx_poll);
-	tasklet_setup(&np->tx_tasklet, tx_poll);
+	INIT_WORK(&np->rx_bh_work, rx_poll);
+	INIT_WORK(&np->tx_bh_work, tx_poll);
 
 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 			&ring_dma, GFP_KERNEL);
@@ -965,7 +966,7 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
 	unsigned long flag;
 
 	netif_stop_queue(dev);
-	tasklet_disable_in_atomic(&np->tx_tasklet);
+	disable_work_sync(&np->tx_bh_work);
 	iowrite16(0, ioaddr + IntrEnable);
 	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 		   "TxFrameId %2.2x,"
@@ -1006,7 +1007,7 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
 		netif_wake_queue(dev);
 	}
 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
-	tasklet_enable(&np->tx_tasklet);
+	enable_and_queue_work(system_bh_wq, &np->tx_bh_work);
 }
 
 
@@ -1058,9 +1059,9 @@ static void init_ring(struct net_device *dev)
 	}
 }
 
-static void tx_poll(struct tasklet_struct *t)
+static void tx_poll(struct work_struct *work)
 {
-	struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
+	struct netdev_private *np = from_work(np, work, tx_bh_work);
 	unsigned head = np->cur_task % TX_RING_SIZE;
 	struct netdev_desc *txdesc =
 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
@@ -1104,11 +1105,11 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
 			goto drop_frame;
 	txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
 
-	/* Increment cur_tx before tasklet_schedule() */
+	/* Increment cur_tx before bh_work is queued */
 	np->cur_tx++;
 	mb();
-	/* Schedule a tx_poll() task */
-	tasklet_schedule(&np->tx_tasklet);
+	/* Queue a tx_poll() bh work */
+	queue_work(system_bh_wq, &np->tx_bh_work);
 
 	/* On some architectures: explicitly flush cache lines here. */
 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
@@ -1199,7 +1200,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
 					ioaddr + IntrEnable);
 			if (np->budget < 0)
 				np->budget = RX_BUDGET;
-			tasklet_schedule(&np->rx_tasklet);
+			queue_work(system_bh_wq, &np->rx_bh_work);
 		}
 		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
 			tx_status = ioread16 (ioaddr + TxStatus);
@@ -1315,9 +1316,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
 	return IRQ_RETVAL(handled);
 }
 
-static void rx_poll(struct tasklet_struct *t)
+static void rx_poll(struct work_struct *work)
 {
-	struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
+	struct netdev_private *np = from_work(np, work, rx_bh_work);
 	struct net_device *dev = np->ndev;
 	int entry = np->cur_rx % RX_RING_SIZE;
 	int boguscnt = np->budget;
@@ -1407,7 +1408,7 @@ static void rx_poll(struct tasklet_struct *t)
 	np->budget -= received;
 	if (np->budget <= 0)
 		np->budget = RX_BUDGET;
-	tasklet_schedule(&np->rx_tasklet);
+	queue_work(system_bh_wq, &np->rx_bh_work);
 }
 
 static void refill_rx (struct net_device *dev)
@@ -1819,9 +1820,9 @@ static int netdev_close(struct net_device *dev)
 	struct sk_buff *skb;
 	int i;
 
-	/* Wait and kill tasklet */
-	tasklet_kill(&np->rx_tasklet);
-	tasklet_kill(&np->tx_tasklet);
+	/* Wait and cancel bh work */
+	cancel_work_sync(&np->rx_bh_work);
+	cancel_work_sync(&np->tx_bh_work);
 	np->cur_tx = 0;
 	np->dirty_tx = 0;
 	np->cur_task = 0;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 10/15] net: hinic: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (8 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 09/15] net: sundance: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-25 10:47   ` Paolo Abeni
  2024-06-21  5:05 ` [PATCH 11/15] net: ehea: " Allen Pais
                   ` (4 subsequent siblings)
  14 siblings, 1 reply; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Cai Huoqing, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, kda, dougmill, npiggin, christophe.leroy, aneesh.kumar,
	naveen.n.rao, nnac123, tlfalcon, cooldavid, marcin.s.wojtas,
	mlindner, stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo,
	matthias.bgg, angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the huawei hinic driver. This transition ensures
compatibility with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 .../net/ethernet/huawei/hinic/hinic_hw_cmdq.c   |  2 +-
 .../net/ethernet/huawei/hinic/hinic_hw_eqs.c    | 17 ++++++++---------
 .../net/ethernet/huawei/hinic/hinic_hw_eqs.h    |  2 +-
 3 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index d39eec9c62bf..f54feae40ef8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -344,7 +344,7 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
 	struct hinic_hw_wqe *hw_wqe;
 	struct completion done;
 
-	/* Keep doorbell index correct. bh - for tasklet(ceq). */
+	/* Keep doorbell index correct. For bh_work(ceq). */
 	spin_lock_bh(&cmdq->cmdq_lock);
 
 	/* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 045c47786a04..381ced8f3c93 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -368,12 +368,12 @@ static void eq_irq_work(struct work_struct *work)
 }
 
 /**
- * ceq_tasklet - the tasklet of the EQ that received the event
- * @t: the tasklet struct pointer
+ * ceq_bh_work - the bh_work of the EQ that received the event
+ * @work: the work struct pointer
  **/
-static void ceq_tasklet(struct tasklet_struct *t)
+static void ceq_bh_work(struct work_struct *work)
 {
-	struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet);
+	struct hinic_eq *ceq = from_work(ceq, work, ceq_bh_work);
 
 	eq_irq_handler(ceq);
 }
@@ -413,7 +413,7 @@ static irqreturn_t ceq_interrupt(int irq, void *data)
 	/* clear resend timer cnt register */
 	hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
 
-	tasklet_schedule(&ceq->ceq_tasklet);
+	queue_work(system_bh_wq, &ceq->ceq_bh_work);
 
 	return IRQ_HANDLED;
 }
@@ -782,7 +782,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
 
 		INIT_WORK(&aeq_work->work, eq_irq_work);
 	} else if (type == HINIC_CEQ) {
-		tasklet_setup(&eq->ceq_tasklet, ceq_tasklet);
+		INIT_WORK(&eq->ceq_bh_work, ceq_bh_work);
 	}
 
 	/* set the attributes of the msix entry */
@@ -833,7 +833,7 @@ static void remove_eq(struct hinic_eq *eq)
 		hinic_hwif_write_reg(eq->hwif,
 				     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
 	} else if (eq->type == HINIC_CEQ) {
-		tasklet_kill(&eq->ceq_tasklet);
+		cancel_work_sync(&eq->ceq_bh_work);
 		/* clear ceq_len to avoid hw access host memory */
 		hinic_hwif_write_reg(eq->hwif,
 				     HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
@@ -968,9 +968,8 @@ void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
 		ci = hinic_hwif_read_reg(hwdev->hwif, addr);
 		addr = EQ_PROD_IDX_REG_ADDR(eq);
 		pi = hinic_hwif_read_reg(hwdev->hwif, addr);
-		dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
+		dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, wrap: %d, ceqe: 0x%x\n",
 			q_id, ci, eq->cons_idx, pi,
-			eq->ceq_tasklet.state,
 			eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq))));
 	}
 }
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
index 2f3222174fc7..8fed3155f15c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -193,7 +193,7 @@ struct hinic_eq {
 
 	struct hinic_eq_work    aeq_work;
 
-	struct tasklet_struct   ceq_tasklet;
+	struct work_struct	ceq_bh_work;
 };
 
 struct hinic_hw_event_cb {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 11/15] net: ehea: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (9 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 10/15] net: hinic: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 12/15] net: ibmvnic: " Allen Pais
                   ` (3 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the ehea driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/ibm/ehea/ehea.h      |  3 ++-
 drivers/net/ethernet/ibm/ehea/ehea_main.c | 14 +++++++-------
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 208c440a602b..c1e7e22884fa 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -19,6 +19,7 @@
 #include <linux/ethtool.h>
 #include <linux/vmalloc.h>
 #include <linux/if_vlan.h>
+#include <linux/workqueue.h>
 #include <linux/platform_device.h>
 
 #include <asm/ibmebus.h>
@@ -381,7 +382,7 @@ struct ehea_adapter {
 	struct platform_device *ofdev;
 	struct ehea_port *port[EHEA_MAX_PORTS];
 	struct ehea_eq *neq;       /* notification event queue */
-	struct tasklet_struct neq_tasklet;
+	struct work_struct neq_bh_work;
 	struct ehea_mr mr;
 	u32 pd;                    /* protection domain */
 	u64 max_mc_mac;            /* max number of multicast mac addresses */
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 1e29e5c9a2df..6960d06805f6 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -976,7 +976,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
 	u64 hret;
 	struct hcp_ehea_port_cb0 *cb0;
 
-	/* may be called via ehea_neq_tasklet() */
+	/* may be called via ehea_neq_bh_work() */
 	cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
 	if (!cb0) {
 		pr_err("no mem for cb0\n");
@@ -1216,9 +1216,9 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
 	}
 }
 
-static void ehea_neq_tasklet(struct tasklet_struct *t)
+static void ehea_neq_bh_work(struct work_struct *work)
 {
-	struct ehea_adapter *adapter = from_tasklet(adapter, t, neq_tasklet);
+	struct ehea_adapter *adapter = from_work(adapter, work, neq_bh_work);
 	struct ehea_eqe *eqe;
 	u64 event_mask;
 
@@ -1243,7 +1243,7 @@ static void ehea_neq_tasklet(struct tasklet_struct *t)
 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
 {
 	struct ehea_adapter *adapter = param;
-	tasklet_hi_schedule(&adapter->neq_tasklet);
+	queue_work(system_bh_highpri_wq, &adapter->neq_bh_work);
 	return IRQ_HANDLED;
 }
 
@@ -3423,7 +3423,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
 		goto out_free_ad;
 	}
 
-	tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet);
+	INIT_WORK(&adapter->neq_bh_work, ehea_neq_bh_work);
 
 	ret = ehea_create_device_sysfs(dev);
 	if (ret)
@@ -3444,7 +3444,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
 	}
 
 	/* Handle any events that might be pending. */
-	tasklet_hi_schedule(&adapter->neq_tasklet);
+	queue_work(system_bh_highpri_wq, &adapter->neq_bh_work);
 
 	ret = 0;
 	goto out;
@@ -3485,7 +3485,7 @@ static void ehea_remove(struct platform_device *dev)
 	ehea_remove_device_sysfs(dev);
 
 	ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
-	tasklet_kill(&adapter->neq_tasklet);
+	cancel_work_sync(&adapter->neq_bh_work);
 
 	ehea_destroy_eq(adapter->neq);
 	ehea_remove_adapter_mr(adapter);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 12/15] net: ibmvnic: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (10 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 11/15] net: ehea: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 13/15] net: jme: " Allen Pais
                   ` (2 subsequent siblings)
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Michael Ellerman, Nicholas Piggin, Christophe Leroy,
	Naveen N. Rao, Haren Myneni, Rick Lindsley, Nick Child,
	Thomas Falcon, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, aneesh.kumar, cooldavid,
	marcin.s.wojtas, mlindner, stephen, nbd, sean.wang, Mark-MC.Lee,
	lorenzo, matthias.bgg, angelogioacchino.delregno, borisp,
	bryan.whitehead, UNGLinuxDriver, louis.peens, richardcochran,
	linux-rdma, linux-kernel, linux-acenic, linux-net-drivers,
	Allen Pais, linuxppc-dev, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the ibmvnic driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/ibm/ibmvnic.c | 24 ++++++++++++------------
 drivers/net/ethernet/ibm/ibmvnic.h |  2 +-
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5e9a93bdb518..2e817a560c3a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2725,7 +2725,7 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
 /*
  * Initialize the init_done completion and return code values. We
  * can get a transport event just after registering the CRQ and the
- * tasklet will use this to communicate the transport event. To ensure
+ * bh work will use this to communicate the transport event. To ensure
  * we don't miss the notification/error, initialize these _before_
  * regisering the CRQ.
  */
@@ -4429,7 +4429,7 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
 	int cap_reqs;
 
 	/* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
-	 * the PROMISC flag). Initialize this count upfront. When the tasklet
+	 * the PROMISC flag). Initialize this count upfront. When the bh work
 	 * receives a response to all of these, it will send the next protocol
 	 * message (QUERY_IP_OFFLOAD).
 	 */
@@ -4965,7 +4965,7 @@ static void send_query_cap(struct ibmvnic_adapter *adapter)
 	int cap_reqs;
 
 	/* We send out 25 QUERY_CAPABILITY CRQs below.  Initialize this count
-	 * upfront. When the tasklet receives a response to all of these, it
+	 * upfront. When the bh work receives a response to all of these, it
 	 * can send out the next protocol messaage (REQUEST_CAPABILITY).
 	 */
 	cap_reqs = 25;
@@ -5477,7 +5477,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
 	int i;
 
 	/* CHECK: Test/set of login_pending does not need to be atomic
-	 * because only ibmvnic_tasklet tests/clears this.
+	 * because only ibmvnic_bh_work tests/clears this.
 	 */
 	if (!adapter->login_pending) {
 		netdev_warn(netdev, "Ignoring unexpected login response\n");
@@ -6063,13 +6063,13 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
 {
 	struct ibmvnic_adapter *adapter = instance;
 
-	tasklet_schedule(&adapter->tasklet);
+	queue_work(system_bh_wq, &adapter->bh_work);
 	return IRQ_HANDLED;
 }
 
-static void ibmvnic_tasklet(struct tasklet_struct *t)
+static void ibmvnic_bh_work(struct work_struct *work)
 {
-	struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
+	struct ibmvnic_adapter *adapter = from_work(adapter, work, bh_work);
 	struct ibmvnic_crq_queue *queue = &adapter->crq;
 	union ibmvnic_crq *crq;
 	unsigned long flags;
@@ -6150,7 +6150,7 @@ static void release_crq_queue(struct ibmvnic_adapter *adapter)
 
 	netdev_dbg(adapter->netdev, "Releasing CRQ\n");
 	free_irq(vdev->irq, adapter);
-	tasklet_kill(&adapter->tasklet);
+	cancel_work_sync(&adapter->bh_work);
 	do {
 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -6201,7 +6201,7 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
 
 	retrc = 0;
 
-	tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
+	INIT_WORK(&adapter->bh_work, (void *)ibmvnic_bh_work);
 
 	netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
 	snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
@@ -6223,12 +6223,12 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
 	spin_lock_init(&crq->lock);
 
 	/* process any CRQs that were queued before we enabled interrupts */
-	tasklet_schedule(&adapter->tasklet);
+	queue_work(system_bh_wq, &adapter->bh_work);
 
 	return retrc;
 
 req_irq_failed:
-	tasklet_kill(&adapter->tasklet);
+	cancel_work_sync(&adapter->bh_work);
 	do {
 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -6621,7 +6621,7 @@ static int ibmvnic_resume(struct device *dev)
 	if (adapter->state != VNIC_OPEN)
 		return 0;
 
-	tasklet_schedule(&adapter->tasklet);
+	queue_work(system_bh_wq, &adapter->bh_work);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 94ac36b1408b..b65b210a8059 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1036,7 +1036,7 @@ struct ibmvnic_adapter {
 	u32 cur_rx_buf_sz;
 	u32 prev_rx_buf_sz;
 
-	struct tasklet_struct tasklet;
+	struct work_struct bh_work;
 	enum vnic_state state;
 	/* Used for serialization of state field. When taking both state
 	 * and rwi locks, take state lock first.
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (11 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 12/15] net: ibmvnic: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-25 10:38   ` Paolo Abeni
  2024-06-21  5:05 ` [PATCH 14/15] net: marvell: " Allen Pais
  2024-06-21  5:05 ` [PATCH 15/15] net: mtk-wed: " Allen Pais
  14 siblings, 1 reply; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Guo-Fu Tseng, David S. Miller, Eric Dumazet, Paolo Abeni
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, marcin.s.wojtas,
	mlindner, stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo,
	matthias.bgg, angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the jme driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
 drivers/net/ethernet/jme.h |  8 ++---
 2 files changed, 40 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..b1a92b851b3b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
 
 	if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
 		if (dpi->attempt < dpi->cur)
-			tasklet_schedule(&jme->rxclean_task);
+			queue_work(system_bh_wq, &jme->rxclean_bh_work);
 		jme_set_rx_pcc(jme, dpi->attempt);
 		dpi->cur = dpi->attempt;
 		dpi->cnt = 0;
@@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
 }
 
 static void
-jme_pcc_tasklet(struct tasklet_struct *t)
+jme_pcc_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
+	struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
 	struct net_device *netdev = jme->dev;
 
 	if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
 		jme_stop_shutdown_timer(jme);
 
 	jme_stop_pcc_timer(jme);
-	tasklet_disable(&jme->txclean_task);
-	tasklet_disable(&jme->rxclean_task);
-	tasklet_disable(&jme->rxempty_task);
+	disable_work_sync(&jme->txclean_bh_work);
+	disable_work_sync(&jme->rxclean_bh_work);
+	disable_work_sync(&jme->rxempty_bh_work);
 
 	if (netif_carrier_ok(netdev)) {
 		jme_disable_rx_engine(jme);
@@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
 		rc = jme_setup_rx_resources(jme);
 		if (rc) {
 			pr_err("Allocating resources for RX error, Device STOPPED!\n");
-			goto out_enable_tasklet;
+			goto out_enable_bh_work;
 		}
 
 		rc = jme_setup_tx_resources(jme);
@@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
 		jme_start_shutdown_timer(jme);
 	}
 
-	goto out_enable_tasklet;
+	goto out_enable_bh_work;
 
 err_out_free_rx_resources:
 	jme_free_rx_resources(jme);
-out_enable_tasklet:
-	tasklet_enable(&jme->txclean_task);
-	tasklet_enable(&jme->rxclean_task);
-	tasklet_enable(&jme->rxempty_task);
+out_enable_bh_work:
+	enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 out:
 	atomic_inc(&jme->link_changing);
 }
 
 static void
-jme_rx_clean_tasklet(struct tasklet_struct *t)
+jme_rx_clean_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
+	struct jme_adapter *jme = from_work(jme, work, rxclean_bh_work);
 	struct dynpcc_info *dpi = &(jme->dpi);
 
 	jme_process_receive(jme, jme->rx_ring_size);
@@ -1374,9 +1374,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
 }
 
 static void
-jme_rx_empty_tasklet(struct tasklet_struct *t)
+jme_rx_empty_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
+	struct jme_adapter *jme = from_work(jme, work, rxempty_bh_work);
 
 	if (unlikely(atomic_read(&jme->link_changing) != 1))
 		return;
@@ -1386,7 +1386,7 @@ jme_rx_empty_tasklet(struct tasklet_struct *t)
 
 	netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
 
-	jme_rx_clean_tasklet(&jme->rxclean_task);
+	jme_rx_clean_bh_work(&jme->rxclean_bh_work);
 
 	while (atomic_read(&jme->rx_empty) > 0) {
 		atomic_dec(&jme->rx_empty);
@@ -1410,9 +1410,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
 
 }
 
-static void jme_tx_clean_tasklet(struct tasklet_struct *t)
+static void jme_tx_clean_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
+	struct jme_adapter *jme = from_work(jme, work, txclean_bh_work);
 	struct jme_ring *txring = &(jme->txring[0]);
 	struct txdesc *txdesc = txring->desc;
 	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1510,12 +1510,12 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
 
 	if (intrstat & INTR_TMINTR) {
 		jwrite32(jme, JME_IEVE, INTR_TMINTR);
-		tasklet_schedule(&jme->pcc_task);
+		queue_work(system_bh_wq, &jme->pcc_bh_work);
 	}
 
 	if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
 		jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
-		tasklet_schedule(&jme->txclean_task);
+		queue_work(system_bh_wq, &jme->txclean_bh_work);
 	}
 
 	if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
@@ -1538,9 +1538,9 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
 	} else {
 		if (intrstat & INTR_RX0EMP) {
 			atomic_inc(&jme->rx_empty);
-			tasklet_hi_schedule(&jme->rxempty_task);
+			queue_work(system_bh_highpri_wq, &jme->rxempty_bh_work);
 		} else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
-			tasklet_hi_schedule(&jme->rxclean_task);
+			queue_work(system_bh_highpri_wq, &jme->rxclean_bh_work);
 		}
 	}
 
@@ -1826,9 +1826,9 @@ jme_open(struct net_device *netdev)
 	jme_clear_pm_disable_wol(jme);
 	JME_NAPI_ENABLE(jme);
 
-	tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
-	tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
-	tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+	INIT_WORK(&jme->txclean_bh_work, jme_tx_clean_bh_work);
+	INIT_WORK(&jme->rxclean_bh_work, jme_rx_clean_bh_work);
+	INIT_WORK(&jme->rxempty_bh_work, jme_rx_empty_bh_work);
 
 	rc = jme_request_irq(jme);
 	if (rc)
@@ -1914,9 +1914,9 @@ jme_close(struct net_device *netdev)
 	JME_NAPI_DISABLE(jme);
 
 	cancel_work_sync(&jme->linkch_task);
-	tasklet_kill(&jme->txclean_task);
-	tasklet_kill(&jme->rxclean_task);
-	tasklet_kill(&jme->rxempty_task);
+	cancel_work_sync(&jme->txclean_bh_work);
+	cancel_work_sync(&jme->rxclean_bh_work);
+	cancel_work_sync(&jme->rxempty_bh_work);
 
 	jme_disable_rx_engine(jme);
 	jme_disable_tx_engine(jme);
@@ -3020,7 +3020,7 @@ jme_init_one(struct pci_dev *pdev,
 	atomic_set(&jme->tx_cleaning, 1);
 	atomic_set(&jme->rx_empty, 1);
 
-	tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+	INIT_WORK(&jme->pcc_bh_work, jme_pcc_bh_work);
 	INIT_WORK(&jme->linkch_task, jme_link_change_work);
 	jme->dpi.cur = PCC_P1;
 
@@ -3180,9 +3180,9 @@ jme_suspend(struct device *dev)
 	netif_stop_queue(netdev);
 	jme_stop_irq(jme);
 
-	tasklet_disable(&jme->txclean_task);
-	tasklet_disable(&jme->rxclean_task);
-	tasklet_disable(&jme->rxempty_task);
+	disable_work_sync(&jme->txclean_bh_work);
+	disable_work_sync(&jme->rxclean_bh_work);
+	disable_work_sync(&jme->rxempty_bh_work);
 
 	if (netif_carrier_ok(netdev)) {
 		if (test_bit(JME_FLAG_POLL, &jme->flags))
@@ -3198,9 +3198,9 @@ jme_suspend(struct device *dev)
 		jme->phylink = 0;
 	}
 
-	tasklet_enable(&jme->txclean_task);
-	tasklet_enable(&jme->rxclean_task);
-	tasklet_enable(&jme->rxempty_task);
+	enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 
 	jme_powersave_phy(jme);
 
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 860494ff3714..73a8a1438340 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -406,11 +406,11 @@ struct jme_adapter {
 	spinlock_t		phy_lock;
 	spinlock_t		macaddr_lock;
 	spinlock_t		rxmcs_lock;
-	struct tasklet_struct	rxempty_task;
-	struct tasklet_struct	rxclean_task;
-	struct tasklet_struct	txclean_task;
+	struct work_struct	rxempty_bh_work;
+	struct work_struct	rxclean_bh_work;
+	struct work_struct	txclean_bh_work;
 	struct work_struct	linkch_task;
-	struct tasklet_struct	pcc_task;
+	struct work_struct	pcc_bh_work;
 	unsigned long		flags;
 	u32			reg_txcs;
 	u32			reg_txpfc;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 14/15] net: marvell: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (12 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 13/15] net: jme: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  2024-06-21  5:05 ` [PATCH 15/15] net: mtk-wed: " Allen Pais
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Marcin Wojtas, Russell King, David S. Miller, Eric Dumazet,
	Paolo Abeni, Mirko Lindner, Stephen Hemminger
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid, nbd,
	sean.wang, Mark-MC.Lee, lorenzo, matthias.bgg,
	angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, Allen Pais, netdev

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the marvell driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c |  4 +---
 drivers/net/ethernet/marvell/skge.c             | 12 ++++++------
 drivers/net/ethernet/marvell/skge.h             |  3 ++-
 3 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 671368d2c77e..47fe71a8f57e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2628,9 +2628,7 @@ static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
  * The number of sent descriptors is returned.
  * Per-thread access
  *
- * Called only from mvpp2_txq_done(), called from mvpp2_tx()
- * (migration disabled) and from the TX completion tasklet (migration
- * disabled) so using smp_processor_id() is OK.
+ * Called only from mvpp2_txq_done().
  */
 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
 					   struct mvpp2_tx_queue *txq)
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index fcfb34561882..4448af079447 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3342,13 +3342,13 @@ static void skge_error_irq(struct skge_hw *hw)
 }
 
 /*
- * Interrupt from PHY are handled in tasklet (softirq)
+ * Interrupt from PHY are handled in bh work (softirq)
  * because accessing phy registers requires spin wait which might
  * cause excess interrupt latency.
  */
-static void skge_extirq(struct tasklet_struct *t)
+static void skge_extirq(struct work_struct *work)
 {
-	struct skge_hw *hw = from_tasklet(hw, t, phy_task);
+	struct skge_hw *hw = from_work(hw, work, phy_bh_work);
 	int port;
 
 	for (port = 0; port < hw->ports; port++) {
@@ -3389,7 +3389,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
 	status &= hw->intr_mask;
 	if (status & IS_EXT_REG) {
 		hw->intr_mask &= ~IS_EXT_REG;
-		tasklet_schedule(&hw->phy_task);
+		queue_work(system_bh_wq, &hw->phy_bh_work);
 	}
 
 	if (status & (IS_XA1_F|IS_R1_F)) {
@@ -3937,7 +3937,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	hw->pdev = pdev;
 	spin_lock_init(&hw->hw_lock);
 	spin_lock_init(&hw->phy_lock);
-	tasklet_setup(&hw->phy_task, skge_extirq);
+	INIT_WORK(&hw->phy_bh_work, skge_extirq);
 
 	hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
 	if (!hw->regs) {
@@ -4035,7 +4035,7 @@ static void skge_remove(struct pci_dev *pdev)
 	dev0 = hw->dev[0];
 	unregister_netdev(dev0);
 
-	tasklet_kill(&hw->phy_task);
+	cancel_work_sync(&hw->phy_bh_work);
 
 	spin_lock_irq(&hw->hw_lock);
 	hw->intr_mask = 0;
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
index f72217348eb4..0cf77f4b1c57 100644
--- a/drivers/net/ethernet/marvell/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -5,6 +5,7 @@
 #ifndef _SKGE_H
 #define _SKGE_H
 #include <linux/interrupt.h>
+#include <linux/workqueue.h>
 
 /* PCI config registers */
 #define PCI_DEV_REG1	0x40
@@ -2418,7 +2419,7 @@ struct skge_hw {
 	u32	     	     ram_offset;
 	u16		     phy_addr;
 	spinlock_t	     phy_lock;
-	struct tasklet_struct phy_task;
+	struct work_struct   phy_bh_work;
 
 	char		     irq_name[]; /* skge@pci:000:04:00.0 */
 };
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [PATCH 15/15] net: mtk-wed: Convert tasklet API to new bottom half workqueue mechanism
       [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
                   ` (13 preceding siblings ...)
  2024-06-21  5:05 ` [PATCH 14/15] net: marvell: " Allen Pais
@ 2024-06-21  5:05 ` Allen Pais
  14 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21  5:05 UTC (permalink / raw)
  To: kuba, Felix Fietkau, Sean Wang, Mark Lee, Lorenzo Bianconi,
	David S. Miller, Eric Dumazet, Paolo Abeni, Matthias Brugger,
	AngeloGioacchino Del Regno
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, cooldavid,
	marcin.s.wojtas, mlindner, stephen, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, Allen Pais, netdev,
	linux-arm-kernel, linux-mediatek

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the mtk-wed driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/mediatek/mtk_wed_wo.c | 12 ++++++------
 drivers/net/ethernet/mediatek/mtk_wed_wo.h |  3 ++-
 2 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index 7063c78bd35f..acca9ec67fcf 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -71,7 +71,7 @@ static void
 mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
 {
 	mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
-	tasklet_schedule(&wo->mmio.irq_tasklet);
+	queue_work(system_bh_wq, &wo->mmio.irq_bh_work);
 }
 
 static void
@@ -227,14 +227,14 @@ mtk_wed_wo_irq_handler(int irq, void *data)
 	struct mtk_wed_wo *wo = data;
 
 	mtk_wed_wo_set_isr(wo, 0);
-	tasklet_schedule(&wo->mmio.irq_tasklet);
+	queue_work(system_bh_wq, &wo->mmio.irq_bh_work);
 
 	return IRQ_HANDLED;
 }
 
-static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
+static void mtk_wed_wo_irq_bh_work(struct work_struct *work)
 {
-	struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
+	struct mtk_wed_wo *wo = from_work(wo, work, mmio.irq_bh_work);
 	u32 intr, mask;
 
 	/* disable interrupts */
@@ -395,7 +395,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
 	wo->mmio.irq = irq_of_parse_and_map(np, 0);
 	wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
 	spin_lock_init(&wo->mmio.lock);
-	tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
+	INIT_WORK(&wo->mmio.irq_bh_work, mtk_wed_wo_irq_bh_work);
 
 	ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
 			       mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
@@ -449,7 +449,7 @@ mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
 	/* disable interrupts */
 	mtk_wed_wo_set_isr(wo, 0);
 
-	tasklet_disable(&wo->mmio.irq_tasklet);
+	disable_work_sync(&wo->mmio.irq_bh_work);
 
 	disable_irq(wo->mmio.irq);
 	devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
index 87a67fa3868d..50d619fa213a 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -6,6 +6,7 @@
 
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
+#include <linux/workqueue.h>
 
 struct mtk_wed_hw;
 
@@ -247,7 +248,7 @@ struct mtk_wed_wo {
 		struct regmap *regs;
 
 		spinlock_t lock;
-		struct tasklet_struct irq_tasklet;
+		struct work_struct irq_bh_work;
 		int irq;
 		u32 irq_mask;
 	} mmio;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* RE:  [PATCH 05/15] net: cavium/liquidio: Convert tasklet API to new bottom half workqueue mechanism
  2024-06-21  5:05 ` [PATCH 05/15] net: cavium/liquidio: " Allen Pais
@ 2024-06-21 11:45   ` Sunil Kovvuri Goutham
  0 siblings, 0 replies; 25+ messages in thread
From: Sunil Kovvuri Goutham @ 2024-06-21 11:45 UTC (permalink / raw)
  To: Allen Pais, kuba@kernel.org, David S. Miller, Eric Dumazet,
	Paolo Abeni
  Cc: jes@trained-monkey.org, kda@linux-powerpc.org,
	cai.huoqing@linux.dev, dougmill@linux.ibm.com, npiggin@gmail.com,
	christophe.leroy@csgroup.eu, aneesh.kumar@kernel.org,
	naveen.n.rao@linux.ibm.com, nnac123@linux.ibm.com,
	tlfalcon@linux.ibm.com, cooldavid@cooldavid.org,
	marcin.s.wojtas@gmail.com, Mirko Lindner,
	stephen@networkplumber.org, nbd@nbd.name, sean.wang@mediatek.com,
	Mark-MC.Lee@mediatek.com, lorenzo@kernel.org,
	matthias.bgg@gmail.com, angelogioacchino.delregno@collabora.com,
	borisp@nvidia.com, bryan.whitehead@microchip.com,
	UNGLinuxDriver@microchip.com, louis.peens@corigine.com,
	richardcochran@gmail.com, linux-rdma@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-acenic@sunsite.dk,
	linux-net-drivers@amd.com, netdev@vger.kernel.org


>Migrate tasklet APIs to the new bottom half workqueue mechanism. It replaces all
>occurrences of tasklet usage with the appropriate workqueue APIs throughout the
>cavium/liquidio driver. This transition ensures compatibility with the latest design
>and enhances performance.
>
>Signed-off-by: Allen Pais <allen.lkml@gmail.com>
>---
> .../net/ethernet/cavium/liquidio/lio_core.c   |  4 ++--
> .../net/ethernet/cavium/liquidio/lio_main.c   | 24 +++++++++----------
> .../ethernet/cavium/liquidio/lio_vf_main.c    | 10 ++++----
> .../ethernet/cavium/liquidio/octeon_droq.c    |  4 ++--
> .../ethernet/cavium/liquidio/octeon_main.h    |  4 ++--
> 5 files changed, 23 insertions(+), 23 deletions(-)
>

LGTM
Reviewed-by: Sunil Goutham <sgoutham@marvell.com>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* RE: [PATCH 07/15] net: thunderx: Convert tasklet API to new bottom half workqueue mechanism
  2024-06-21  5:05 ` [PATCH 07/15] net: thunderx: " Allen Pais
@ 2024-06-21 11:49   ` Sunil Kovvuri Goutham
  0 siblings, 0 replies; 25+ messages in thread
From: Sunil Kovvuri Goutham @ 2024-06-21 11:49 UTC (permalink / raw)
  To: Allen Pais, kuba@kernel.org, David S. Miller, Eric Dumazet,
	Paolo Abeni
  Cc: jes@trained-monkey.org, kda@linux-powerpc.org,
	cai.huoqing@linux.dev, dougmill@linux.ibm.com, npiggin@gmail.com,
	christophe.leroy@csgroup.eu, aneesh.kumar@kernel.org,
	naveen.n.rao@linux.ibm.com, nnac123@linux.ibm.com,
	tlfalcon@linux.ibm.com, cooldavid@cooldavid.org,
	marcin.s.wojtas@gmail.com, Mirko Lindner,
	stephen@networkplumber.org, nbd@nbd.name, sean.wang@mediatek.com,
	Mark-MC.Lee@mediatek.com, lorenzo@kernel.org,
	matthias.bgg@gmail.com, angelogioacchino.delregno@collabora.com,
	borisp@nvidia.com, bryan.whitehead@microchip.com,
	UNGLinuxDriver@microchip.com, louis.peens@corigine.com,
	richardcochran@gmail.com, linux-rdma@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-acenic@sunsite.dk,
	linux-net-drivers@amd.com, linux-arm-kernel@lists.infradead.org,
	netdev@vger.kernel.org

>Migrate tasklet APIs to the new bottom half workqueue mechanism. It replaces all
>occurrences of tasklet usage with the appropriate workqueue APIs throughout the
>cavium/thunderx driver. This transition ensures compatibility with the latest design
>and enhances performance.
>
>Signed-off-by: Allen Pais <allen.lkml@gmail.com>
>---
> drivers/net/ethernet/cavium/thunder/nic.h     |  5 ++--
> .../net/ethernet/cavium/thunder/nicvf_main.c  | 24 +++++++++----------
> .../ethernet/cavium/thunder/nicvf_queues.c    |  4 ++--
> .../ethernet/cavium/thunder/nicvf_queues.h    |  2 +-
> 4 files changed, 18 insertions(+), 17 deletions(-)
>

LGTM
Reviewed-by: Sunil Goutham <sgoutham@marvell.com>

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
  2024-06-21 18:39 [PATCH 00/15] ethernet: Convert from tasklet to BH workqueue Allen Pais
@ 2024-06-21 18:39 ` Allen Pais
  0 siblings, 0 replies; 25+ messages in thread
From: Allen Pais @ 2024-06-21 18:39 UTC (permalink / raw)
  To: netdev; +Cc: Allen Pais

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the jme driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
 drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
 drivers/net/ethernet/jme.h |  8 ++---
 2 files changed, 40 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..b1a92b851b3b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
 
 	if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
 		if (dpi->attempt < dpi->cur)
-			tasklet_schedule(&jme->rxclean_task);
+			queue_work(system_bh_wq, &jme->rxclean_bh_work);
 		jme_set_rx_pcc(jme, dpi->attempt);
 		dpi->cur = dpi->attempt;
 		dpi->cnt = 0;
@@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
 }
 
 static void
-jme_pcc_tasklet(struct tasklet_struct *t)
+jme_pcc_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
+	struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
 	struct net_device *netdev = jme->dev;
 
 	if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
 		jme_stop_shutdown_timer(jme);
 
 	jme_stop_pcc_timer(jme);
-	tasklet_disable(&jme->txclean_task);
-	tasklet_disable(&jme->rxclean_task);
-	tasklet_disable(&jme->rxempty_task);
+	disable_work_sync(&jme->txclean_bh_work);
+	disable_work_sync(&jme->rxclean_bh_work);
+	disable_work_sync(&jme->rxempty_bh_work);
 
 	if (netif_carrier_ok(netdev)) {
 		jme_disable_rx_engine(jme);
@@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
 		rc = jme_setup_rx_resources(jme);
 		if (rc) {
 			pr_err("Allocating resources for RX error, Device STOPPED!\n");
-			goto out_enable_tasklet;
+			goto out_enable_bh_work;
 		}
 
 		rc = jme_setup_tx_resources(jme);
@@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
 		jme_start_shutdown_timer(jme);
 	}
 
-	goto out_enable_tasklet;
+	goto out_enable_bh_work;
 
 err_out_free_rx_resources:
 	jme_free_rx_resources(jme);
-out_enable_tasklet:
-	tasklet_enable(&jme->txclean_task);
-	tasklet_enable(&jme->rxclean_task);
-	tasklet_enable(&jme->rxempty_task);
+out_enable_bh_work:
+	enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 out:
 	atomic_inc(&jme->link_changing);
 }
 
 static void
-jme_rx_clean_tasklet(struct tasklet_struct *t)
+jme_rx_clean_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
+	struct jme_adapter *jme = from_work(jme, work, rxclean_bh_work);
 	struct dynpcc_info *dpi = &(jme->dpi);
 
 	jme_process_receive(jme, jme->rx_ring_size);
@@ -1374,9 +1374,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
 }
 
 static void
-jme_rx_empty_tasklet(struct tasklet_struct *t)
+jme_rx_empty_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
+	struct jme_adapter *jme = from_work(jme, work, rxempty_bh_work);
 
 	if (unlikely(atomic_read(&jme->link_changing) != 1))
 		return;
@@ -1386,7 +1386,7 @@ jme_rx_empty_tasklet(struct tasklet_struct *t)
 
 	netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
 
-	jme_rx_clean_tasklet(&jme->rxclean_task);
+	jme_rx_clean_bh_work(&jme->rxclean_bh_work);
 
 	while (atomic_read(&jme->rx_empty) > 0) {
 		atomic_dec(&jme->rx_empty);
@@ -1410,9 +1410,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
 
 }
 
-static void jme_tx_clean_tasklet(struct tasklet_struct *t)
+static void jme_tx_clean_bh_work(struct work_struct *work)
 {
-	struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
+	struct jme_adapter *jme = from_work(jme, work, txclean_bh_work);
 	struct jme_ring *txring = &(jme->txring[0]);
 	struct txdesc *txdesc = txring->desc;
 	struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1510,12 +1510,12 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
 
 	if (intrstat & INTR_TMINTR) {
 		jwrite32(jme, JME_IEVE, INTR_TMINTR);
-		tasklet_schedule(&jme->pcc_task);
+		queue_work(system_bh_wq, &jme->pcc_bh_work);
 	}
 
 	if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
 		jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
-		tasklet_schedule(&jme->txclean_task);
+		queue_work(system_bh_wq, &jme->txclean_bh_work);
 	}
 
 	if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
@@ -1538,9 +1538,9 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
 	} else {
 		if (intrstat & INTR_RX0EMP) {
 			atomic_inc(&jme->rx_empty);
-			tasklet_hi_schedule(&jme->rxempty_task);
+			queue_work(system_bh_highpri_wq, &jme->rxempty_bh_work);
 		} else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
-			tasklet_hi_schedule(&jme->rxclean_task);
+			queue_work(system_bh_highpri_wq, &jme->rxclean_bh_work);
 		}
 	}
 
@@ -1826,9 +1826,9 @@ jme_open(struct net_device *netdev)
 	jme_clear_pm_disable_wol(jme);
 	JME_NAPI_ENABLE(jme);
 
-	tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
-	tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
-	tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+	INIT_WORK(&jme->txclean_bh_work, jme_tx_clean_bh_work);
+	INIT_WORK(&jme->rxclean_bh_work, jme_rx_clean_bh_work);
+	INIT_WORK(&jme->rxempty_bh_work, jme_rx_empty_bh_work);
 
 	rc = jme_request_irq(jme);
 	if (rc)
@@ -1914,9 +1914,9 @@ jme_close(struct net_device *netdev)
 	JME_NAPI_DISABLE(jme);
 
 	cancel_work_sync(&jme->linkch_task);
-	tasklet_kill(&jme->txclean_task);
-	tasklet_kill(&jme->rxclean_task);
-	tasklet_kill(&jme->rxempty_task);
+	cancel_work_sync(&jme->txclean_bh_work);
+	cancel_work_sync(&jme->rxclean_bh_work);
+	cancel_work_sync(&jme->rxempty_bh_work);
 
 	jme_disable_rx_engine(jme);
 	jme_disable_tx_engine(jme);
@@ -3020,7 +3020,7 @@ jme_init_one(struct pci_dev *pdev,
 	atomic_set(&jme->tx_cleaning, 1);
 	atomic_set(&jme->rx_empty, 1);
 
-	tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+	INIT_WORK(&jme->pcc_bh_work, jme_pcc_bh_work);
 	INIT_WORK(&jme->linkch_task, jme_link_change_work);
 	jme->dpi.cur = PCC_P1;
 
@@ -3180,9 +3180,9 @@ jme_suspend(struct device *dev)
 	netif_stop_queue(netdev);
 	jme_stop_irq(jme);
 
-	tasklet_disable(&jme->txclean_task);
-	tasklet_disable(&jme->rxclean_task);
-	tasklet_disable(&jme->rxempty_task);
+	disable_work_sync(&jme->txclean_bh_work);
+	disable_work_sync(&jme->rxclean_bh_work);
+	disable_work_sync(&jme->rxempty_bh_work);
 
 	if (netif_carrier_ok(netdev)) {
 		if (test_bit(JME_FLAG_POLL, &jme->flags))
@@ -3198,9 +3198,9 @@ jme_suspend(struct device *dev)
 		jme->phylink = 0;
 	}
 
-	tasklet_enable(&jme->txclean_task);
-	tasklet_enable(&jme->rxclean_task);
-	tasklet_enable(&jme->rxempty_task);
+	enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 
 	jme_powersave_phy(jme);
 
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 860494ff3714..73a8a1438340 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -406,11 +406,11 @@ struct jme_adapter {
 	spinlock_t		phy_lock;
 	spinlock_t		macaddr_lock;
 	spinlock_t		rxmcs_lock;
-	struct tasklet_struct	rxempty_task;
-	struct tasklet_struct	rxclean_task;
-	struct tasklet_struct	txclean_task;
+	struct work_struct	rxempty_bh_work;
+	struct work_struct	rxclean_bh_work;
+	struct work_struct	txclean_bh_work;
 	struct work_struct	linkch_task;
-	struct tasklet_struct	pcc_task;
+	struct work_struct	pcc_bh_work;
 	unsigned long		flags;
 	u32			reg_txcs;
 	u32			reg_txpfc;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
  2024-06-21  5:05 ` [PATCH 13/15] net: jme: " Allen Pais
@ 2024-06-25 10:38   ` Paolo Abeni
  2024-07-01 10:13     ` Allen
  0 siblings, 1 reply; 25+ messages in thread
From: Paolo Abeni @ 2024-06-25 10:38 UTC (permalink / raw)
  To: Allen Pais, kuba, Guo-Fu Tseng, David S. Miller, Eric Dumazet
  Cc: jes, kda, cai.huoqing, dougmill, npiggin, christophe.leroy,
	aneesh.kumar, naveen.n.rao, nnac123, tlfalcon, marcin.s.wojtas,
	mlindner, stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo,
	matthias.bgg, angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, netdev

On Thu, 2024-06-20 at 22:05 -0700, Allen Pais wrote:
> Migrate tasklet APIs to the new bottom half workqueue mechanism. It
> replaces all occurrences of tasklet usage with the appropriate workqueue
> APIs throughout the jme driver. This transition ensures compatibility
> with the latest design and enhances performance.
> 
> Signed-off-by: Allen Pais <allen.lkml@gmail.com>
> ---
>  drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
>  drivers/net/ethernet/jme.h |  8 ++---
>  2 files changed, 40 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
> index b06e24562973..b1a92b851b3b 100644
> --- a/drivers/net/ethernet/jme.c
> +++ b/drivers/net/ethernet/jme.c
> @@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
>  
>  	if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
>  		if (dpi->attempt < dpi->cur)
> -			tasklet_schedule(&jme->rxclean_task);
> +			queue_work(system_bh_wq, &jme->rxclean_bh_work);
>  		jme_set_rx_pcc(jme, dpi->attempt);
>  		dpi->cur = dpi->attempt;
>  		dpi->cnt = 0;
> @@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
>  }
>  
>  static void
> -jme_pcc_tasklet(struct tasklet_struct *t)
> +jme_pcc_bh_work(struct work_struct *work)
>  {
> -	struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
> +	struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
>  	struct net_device *netdev = jme->dev;
>  
>  	if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
> @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
>  		jme_stop_shutdown_timer(jme);
>  
>  	jme_stop_pcc_timer(jme);
> -	tasklet_disable(&jme->txclean_task);
> -	tasklet_disable(&jme->rxclean_task);
> -	tasklet_disable(&jme->rxempty_task);
> +	disable_work_sync(&jme->txclean_bh_work);
> +	disable_work_sync(&jme->rxclean_bh_work);
> +	disable_work_sync(&jme->rxempty_bh_work);
>  
>  	if (netif_carrier_ok(netdev)) {
>  		jme_disable_rx_engine(jme);
> @@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
>  		rc = jme_setup_rx_resources(jme);
>  		if (rc) {
>  			pr_err("Allocating resources for RX error, Device STOPPED!\n");
> -			goto out_enable_tasklet;
> +			goto out_enable_bh_work;
>  		}
>  
>  		rc = jme_setup_tx_resources(jme);
> @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
>  		jme_start_shutdown_timer(jme);
>  	}
>  
> -	goto out_enable_tasklet;
> +	goto out_enable_bh_work;
>  
>  err_out_free_rx_resources:
>  	jme_free_rx_resources(jme);
> -out_enable_tasklet:
> -	tasklet_enable(&jme->txclean_task);
> -	tasklet_enable(&jme->rxclean_task);
> -	tasklet_enable(&jme->rxempty_task);
> +out_enable_bh_work:
> +	enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> +	enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> +	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

This will unconditionally schedule the rxempty_bh_work and is AFAICS a
different behavior WRT prior this patch.

In turn the rxempty_bh_work() will emit (almost unconditionally) the
'RX Queue Full!' message, so the change should be visibile to the user.

I think you should queue the work only if it was queued at cancel time.
You likely need additional status to do that.

Thanks,

Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 10/15] net: hinic: Convert tasklet API to new bottom half workqueue mechanism
  2024-06-21  5:05 ` [PATCH 10/15] net: hinic: " Allen Pais
@ 2024-06-25 10:47   ` Paolo Abeni
  0 siblings, 0 replies; 25+ messages in thread
From: Paolo Abeni @ 2024-06-25 10:47 UTC (permalink / raw)
  To: Allen Pais, kuba, Cai Huoqing, David S. Miller, Eric Dumazet
  Cc: jes, kda, dougmill, npiggin, christophe.leroy, aneesh.kumar,
	naveen.n.rao, nnac123, tlfalcon, cooldavid, marcin.s.wojtas,
	mlindner, stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo,
	matthias.bgg, angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, netdev

On Thu, 2024-06-20 at 22:05 -0700, Allen Pais wrote:
> @@ -968,9 +968,8 @@ void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
>  		ci = hinic_hwif_read_reg(hwdev->hwif, addr);
>  		addr = EQ_PROD_IDX_REG_ADDR(eq);
>  		pi = hinic_hwif_read_reg(hwdev->hwif, addr);
> -		dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
> +		dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, wrap: %d, ceqe: 0x%x\n",
>  			q_id, ci, eq->cons_idx, pi,
> -			eq->ceq_tasklet.state,

I guess here you can show work_pending() as a partial replacement for
the tasklet state.

Thanks!

Paolo



^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
  2024-06-25 10:38   ` Paolo Abeni
@ 2024-07-01 10:13     ` Allen
  2024-07-01 14:23       ` Paolo Abeni
  0 siblings, 1 reply; 25+ messages in thread
From: Allen @ 2024-07-01 10:13 UTC (permalink / raw)
  To: Paolo Abeni
  Cc: kuba, Guo-Fu Tseng, David S. Miller, Eric Dumazet, jes, kda,
	cai.huoqing, dougmill, npiggin, christophe.leroy, aneesh.kumar,
	naveen.n.rao, nnac123, tlfalcon, marcin.s.wojtas, mlindner,
	stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo, matthias.bgg,
	angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, netdev

> > Migrate tasklet APIs to the new bottom half workqueue mechanism. It
> > replaces all occurrences of tasklet usage with the appropriate workqueue
> > APIs throughout the jme driver. This transition ensures compatibility
> > with the latest design and enhances performance.
> >
> > Signed-off-by: Allen Pais <allen.lkml@gmail.com>
> > ---
> >  drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
> >  drivers/net/ethernet/jme.h |  8 ++---
> >  2 files changed, 40 insertions(+), 40 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
> > index b06e24562973..b1a92b851b3b 100644
> > --- a/drivers/net/ethernet/jme.c
> > +++ b/drivers/net/ethernet/jme.c
> > @@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
> >
> >       if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
> >               if (dpi->attempt < dpi->cur)
> > -                     tasklet_schedule(&jme->rxclean_task);
> > +                     queue_work(system_bh_wq, &jme->rxclean_bh_work);
> >               jme_set_rx_pcc(jme, dpi->attempt);
> >               dpi->cur = dpi->attempt;
> >               dpi->cnt = 0;
> > @@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
> >  }
> >
> >  static void
> > -jme_pcc_tasklet(struct tasklet_struct *t)
> > +jme_pcc_bh_work(struct work_struct *work)
> >  {
> > -     struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
> > +     struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
> >       struct net_device *netdev = jme->dev;
> >
> >       if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
> > @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
> >               jme_stop_shutdown_timer(jme);
> >
> >       jme_stop_pcc_timer(jme);
> > -     tasklet_disable(&jme->txclean_task);
> > -     tasklet_disable(&jme->rxclean_task);
> > -     tasklet_disable(&jme->rxempty_task);
> > +     disable_work_sync(&jme->txclean_bh_work);
> > +     disable_work_sync(&jme->rxclean_bh_work);
> > +     disable_work_sync(&jme->rxempty_bh_work);
> >
> >       if (netif_carrier_ok(netdev)) {
> >               jme_disable_rx_engine(jme);
> > @@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
> >               rc = jme_setup_rx_resources(jme);
> >               if (rc) {
> >                       pr_err("Allocating resources for RX error, Device STOPPED!\n");
> > -                     goto out_enable_tasklet;
> > +                     goto out_enable_bh_work;
> >               }
> >
> >               rc = jme_setup_tx_resources(jme);
> > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
> >               jme_start_shutdown_timer(jme);
> >       }
> >
> > -     goto out_enable_tasklet;
> > +     goto out_enable_bh_work;
> >
> >  err_out_free_rx_resources:
> >       jme_free_rx_resources(jme);
> > -out_enable_tasklet:
> > -     tasklet_enable(&jme->txclean_task);
> > -     tasklet_enable(&jme->rxclean_task);
> > -     tasklet_enable(&jme->rxempty_task);
> > +out_enable_bh_work:
> > +     enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > +     enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > +     enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>
> This will unconditionally schedule the rxempty_bh_work and is AFAICS a
> different behavior WRT prior this patch.
>
> In turn the rxempty_bh_work() will emit (almost unconditionally) the
> 'RX Queue Full!' message, so the change should be visibile to the user.
>
> I think you should queue the work only if it was queued at cancel time.
> You likely need additional status to do that.
>

 Thank you for taking the time out to review. Now that it's been a week, I was
preparing to send out version 3. Before I do that, I want to make sure if this
the below approach is acceptable.

diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..b3fc2e5c379f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)

        if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
                if (dpi->attempt < dpi->cur)
-                       tasklet_schedule(&jme->rxclean_task);
+                       queue_work(system_bh_wq, &jme->rxclean_bh_work);
                jme_set_rx_pcc(jme, dpi->attempt);
                dpi->cur = dpi->attempt;
                dpi->cnt = 0;
@@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
 }

 static void
-jme_pcc_tasklet(struct tasklet_struct *t)
+jme_pcc_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
+       struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
        struct net_device *netdev = jme->dev;

        if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
                jme_stop_shutdown_timer(jme);

        jme_stop_pcc_timer(jme);
-       tasklet_disable(&jme->txclean_task);
-       tasklet_disable(&jme->rxclean_task);
-       tasklet_disable(&jme->rxempty_task);
+       disable_work_sync(&jme->txclean_bh_work);
+       disable_work_sync(&jme->rxclean_bh_work);
+       disable_work_sync(&jme->rxempty_bh_work);

        if (netif_carrier_ok(netdev)) {
                jme_disable_rx_engine(jme);
@@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
                rc = jme_setup_rx_resources(jme);
                if (rc) {
                        pr_err("Allocating resources for RX error,
Device STOPPED!\n");
-                       goto out_enable_tasklet;
+                       goto out_enable_bh_work;
                }

                rc = jme_setup_tx_resources(jme);
@@ -1326,22 +1326,23 @@ static void jme_link_change_work(struct
work_struct *work)
                jme_start_shutdown_timer(jme);
        }

-       goto out_enable_tasklet;
+       goto out_enable_bh_work;

 err_out_free_rx_resources:
        jme_free_rx_resources(jme);
-out_enable_tasklet:
-       tasklet_enable(&jme->txclean_task);
-       tasklet_enable(&jme->rxclean_task);
-       tasklet_enable(&jme->rxempty_task);
+out_enable_bh_work:
+       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+       if (jme->rxempty_bh_work_queued)
+               enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 out:
        atomic_inc(&jme->link_changing);
 }

 static void
-jme_rx_clean_tasklet(struct tasklet_struct *t)
+jme_rx_clean_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
+       struct jme_adapter *jme = from_work(jme, work, rxclean_bh_work);
        struct dynpcc_info *dpi = &(jme->dpi);

        jme_process_receive(jme, jme->rx_ring_size);
@@ -1374,9 +1375,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
 }

 static void
-jme_rx_empty_tasklet(struct tasklet_struct *t)
+jme_rx_empty_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
+       struct jme_adapter *jme = from_work(jme, work, rxempty_bh_work);

        if (unlikely(atomic_read(&jme->link_changing) != 1))
                return;
@@ -1386,7 +1387,7 @@ jme_rx_empty_tasklet(struct tasklet_struct *t)

        netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");

-       jme_rx_clean_tasklet(&jme->rxclean_task);
+       jme_rx_clean_bh_work(&jme->rxclean_bh_work);

        while (atomic_read(&jme->rx_empty) > 0) {
                atomic_dec(&jme->rx_empty);
@@ -1410,9 +1411,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)

 }

-static void jme_tx_clean_tasklet(struct tasklet_struct *t)
+static void jme_tx_clean_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
+       struct jme_adapter *jme = from_work(jme, work, txclean_bh_work);
        struct jme_ring *txring = &(jme->txring[0]);
        struct txdesc *txdesc = txring->desc;
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1510,12 +1511,12 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)

        if (intrstat & INTR_TMINTR) {
                jwrite32(jme, JME_IEVE, INTR_TMINTR);
-               tasklet_schedule(&jme->pcc_task);
+               queue_work(system_bh_wq, &jme->pcc_bh_work);
        }

        if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
                jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
-               tasklet_schedule(&jme->txclean_task);
+               queue_work(system_bh_wq, &jme->txclean_bh_work);
        }

        if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
@@ -1538,9 +1539,9 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
        } else {
                if (intrstat & INTR_RX0EMP) {
                        atomic_inc(&jme->rx_empty);
-                       tasklet_hi_schedule(&jme->rxempty_task);
+                       queue_work(system_bh_highpri_wq, &jme->rxempty_bh_work);
                } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
-                       tasklet_hi_schedule(&jme->rxclean_task);
+                       queue_work(system_bh_highpri_wq, &jme->rxclean_bh_work);
                }
        }

@@ -1826,9 +1827,9 @@ jme_open(struct net_device *netdev)
        jme_clear_pm_disable_wol(jme);
        JME_NAPI_ENABLE(jme);

-       tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
-       tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
-       tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+       INIT_WORK(&jme->txclean_bh_work, jme_tx_clean_bh_work);
+       INIT_WORK(&jme->rxclean_bh_work, jme_rx_clean_bh_work);
+       INIT_WORK(&jme->rxempty_bh_work, jme_rx_empty_bh_work);

        rc = jme_request_irq(jme);
        if (rc)
@@ -1914,9 +1915,10 @@ jme_close(struct net_device *netdev)
        JME_NAPI_DISABLE(jme);

        cancel_work_sync(&jme->linkch_task);
-       tasklet_kill(&jme->txclean_task);
-       tasklet_kill(&jme->rxclean_task);
-       tasklet_kill(&jme->rxempty_task);
+       cancel_work_sync(&jme->txclean_bh_work);
+       cancel_work_sync(&jme->rxclean_bh_work);
+       jme->rxempty_bh_work_queued = false;
+       cancel_work_sync(&jme->rxempty_bh_work);

        jme_disable_rx_engine(jme);
        jme_disable_tx_engine(jme);
@@ -3020,7 +3022,7 @@ jme_init_one(struct pci_dev *pdev,
        atomic_set(&jme->tx_cleaning, 1);
        atomic_set(&jme->rx_empty, 1);

-       tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+       INIT_WORK(&jme->pcc_bh_work, jme_pcc_bh_work);
        INIT_WORK(&jme->linkch_task, jme_link_change_work);
        jme->dpi.cur = PCC_P1;

@@ -3180,9 +3182,9 @@ jme_suspend(struct device *dev)
        netif_stop_queue(netdev);
        jme_stop_irq(jme);

-       tasklet_disable(&jme->txclean_task);
-       tasklet_disable(&jme->rxclean_task);
-       tasklet_disable(&jme->rxempty_task);
+       disable_work_sync(&jme->txclean_bh_work);
+       disable_work_sync(&jme->rxclean_bh_work);
+       disable_work_sync(&jme->rxempty_bh_work);

        if (netif_carrier_ok(netdev)) {
                if (test_bit(JME_FLAG_POLL, &jme->flags))
@@ -3198,9 +3200,10 @@ jme_suspend(struct device *dev)
                jme->phylink = 0;
        }

-       tasklet_enable(&jme->txclean_task);
-       tasklet_enable(&jme->rxclean_task);
-       tasklet_enable(&jme->rxempty_task);
+       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+       jme->rxempty_bh_work_queued = true;
+       enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

        jme_powersave_phy(jme);

diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 860494ff3714..44aaf7625dc3 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -406,11 +406,12 @@ struct jme_adapter {
        spinlock_t              phy_lock;
        spinlock_t              macaddr_lock;
        spinlock_t              rxmcs_lock;
-       struct tasklet_struct   rxempty_task;
-       struct tasklet_struct   rxclean_task;
-       struct tasklet_struct   txclean_task;
+       struct work_struct      rxempty_bh_work;
+       struct work_struct      rxclean_bh_work;
+       struct work_struct      txclean_bh_work;
+       bool                    rxempty_bh_work_queued;
        struct work_struct      linkch_task;
-       struct tasklet_struct   pcc_task;
+       struct work_struct      pcc_bh_work;
        unsigned long           flags;
        u32                     reg_txcs;
        u32                     reg_txpfc;



  Do we need a flag for rxclean and txclean too?

Thanks,
Allen

> Thanks,
>
> Paolo
>


-- 
       - Allen

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* Re: [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
  2024-07-01 10:13     ` Allen
@ 2024-07-01 14:23       ` Paolo Abeni
  2024-07-15 17:50         ` Allen
  0 siblings, 1 reply; 25+ messages in thread
From: Paolo Abeni @ 2024-07-01 14:23 UTC (permalink / raw)
  To: Allen
  Cc: kuba, Guo-Fu Tseng, David S. Miller, Eric Dumazet, jes, kda,
	cai.huoqing, dougmill, npiggin, christophe.leroy, aneesh.kumar,
	naveen.n.rao, nnac123, tlfalcon, marcin.s.wojtas, mlindner,
	stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo, matthias.bgg,
	angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, netdev

On Mon, 2024-07-01 at 03:13 -0700, Allen wrote:
> > > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
> > >               jme_start_shutdown_timer(jme);
> > >       }
> > > 
> > > -     goto out_enable_tasklet;
> > > +     goto out_enable_bh_work;
> > > 
> > >  err_out_free_rx_resources:
> > >       jme_free_rx_resources(jme);
> > > -out_enable_tasklet:
> > > -     tasklet_enable(&jme->txclean_task);
> > > -     tasklet_enable(&jme->rxclean_task);
> > > -     tasklet_enable(&jme->rxempty_task);
> > > +out_enable_bh_work:
> > > +     enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > > +     enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > > +     enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
> > 
> > This will unconditionally schedule the rxempty_bh_work and is AFAICS a
> > different behavior WRT prior this patch.
> > 
> > In turn the rxempty_bh_work() will emit (almost unconditionally) the
> > 'RX Queue Full!' message, so the change should be visibile to the user.
> > 
> > I think you should queue the work only if it was queued at cancel time.
> > You likely need additional status to do that.
> > 
> 
>  Thank you for taking the time out to review. Now that it's been a week, I was
> preparing to send out version 3. Before I do that, I want to make sure if this
> the below approach is acceptable.

I _think_ the following does not track the  rxempty_bh_work 'queued'
status fully/correctly.

> @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
>                 jme_stop_shutdown_timer(jme);
> 
>         jme_stop_pcc_timer(jme);
> -       tasklet_disable(&jme->txclean_task);
> -       tasklet_disable(&jme->rxclean_task);
> -       tasklet_disable(&jme->rxempty_task);
> +       disable_work_sync(&jme->txclean_bh_work);
> +       disable_work_sync(&jme->rxclean_bh_work);
> +       disable_work_sync(&jme->rxempty_bh_work);

I think the above should be:

	  jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);

[...]
> @@ -1326,22 +1326,23 @@ static void jme_link_change_work(struct
> work_struct *work)
>                 jme_start_shutdown_timer(jme);
>         }
> 
> -       goto out_enable_tasklet;
> +       goto out_enable_bh_work;
> 
>  err_out_free_rx_resources:
>         jme_free_rx_resources(jme);
> -out_enable_tasklet:
> -       tasklet_enable(&jme->txclean_task);
> -       tasklet_enable(&jme->rxclean_task);
> -       tasklet_enable(&jme->rxempty_task);
> +out_enable_bh_work:
> +       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> +       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> +       if (jme->rxempty_bh_work_queued)
> +               enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

Missing:

	  else
		enable_work(system_bh_wq, &jme->rxempty_bh_work);

[...]
> @@ -3180,9 +3182,9 @@ jme_suspend(struct device *dev)
>         netif_stop_queue(netdev);
>         jme_stop_irq(jme);
> 
> -       tasklet_disable(&jme->txclean_task);
> -       tasklet_disable(&jme->rxclean_task);
> -       tasklet_disable(&jme->rxempty_task);
> +       disable_work_sync(&jme->txclean_bh_work);
> +       disable_work_sync(&jme->rxclean_bh_work);
> +       disable_work_sync(&jme->rxempty_bh_work);

should be:

	  jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);	


> 
> @@ -3198,9 +3200,10 @@ jme_suspend(struct device *dev)
>                 jme->phylink = 0;
>         }
> 
> -       tasklet_enable(&jme->txclean_task);
> -       tasklet_enable(&jme->rxclean_task);
> -       tasklet_enable(&jme->rxempty_task);
> +       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> +       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> +       jme->rxempty_bh_work_queued = true;
> +       enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

should be:

	if (jme->rxempty_bh_work_queued)
        	enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
	else
		enable_work(system_bh_wq, &jme->rxempty_bh_work);

I think the above ones are the only places where you need to touch
'rxempty_bh_work_queued'.


[...]
>   Do we need a flag for rxclean and txclean too?

Functionally speaking I don't think it will be necessary, as
rxclean_bh_work() and txclean_bh_work() don't emit warnings on spurious
invocation.

Thanks,

Paolo



^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
  2024-07-01 14:23       ` Paolo Abeni
@ 2024-07-15 17:50         ` Allen
  2024-07-16  8:47           ` Paolo Abeni
  0 siblings, 1 reply; 25+ messages in thread
From: Allen @ 2024-07-15 17:50 UTC (permalink / raw)
  To: Paolo Abeni
  Cc: kuba, Guo-Fu Tseng, David S. Miller, Eric Dumazet, jes, kda,
	cai.huoqing, dougmill, npiggin, christophe.leroy, aneesh.kumar,
	naveen.n.rao, nnac123, tlfalcon, marcin.s.wojtas, mlindner,
	stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo, matthias.bgg,
	angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, netdev

> > > > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
> > > >               jme_start_shutdown_timer(jme);
> > > >       }
> > > >
> > > > -     goto out_enable_tasklet;
> > > > +     goto out_enable_bh_work;
> > > >
> > > >  err_out_free_rx_resources:
> > > >       jme_free_rx_resources(jme);
> > > > -out_enable_tasklet:
> > > > -     tasklet_enable(&jme->txclean_task);
> > > > -     tasklet_enable(&jme->rxclean_task);
> > > > -     tasklet_enable(&jme->rxempty_task);
> > > > +out_enable_bh_work:
> > > > +     enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > > > +     enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > > > +     enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
> > >
> > > This will unconditionally schedule the rxempty_bh_work and is AFAICS a
> > > different behavior WRT prior this patch.
> > >
> > > In turn the rxempty_bh_work() will emit (almost unconditionally) the
> > > 'RX Queue Full!' message, so the change should be visibile to the user.
> > >
> > > I think you should queue the work only if it was queued at cancel time.
> > > You likely need additional status to do that.
> > >
> >
> >  Thank you for taking the time out to review. Now that it's been a week, I was
> > preparing to send out version 3. Before I do that, I want to make sure if this
> > the below approach is acceptable.
>
> I _think_ the following does not track the  rxempty_bh_work 'queued'
> status fully/correctly.
>
> > @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
> >                 jme_stop_shutdown_timer(jme);
> >
> >         jme_stop_pcc_timer(jme);
> > -       tasklet_disable(&jme->txclean_task);
> > -       tasklet_disable(&jme->rxclean_task);
> > -       tasklet_disable(&jme->rxempty_task);
> > +       disable_work_sync(&jme->txclean_bh_work);
> > +       disable_work_sync(&jme->rxclean_bh_work);
> > +       disable_work_sync(&jme->rxempty_bh_work);
>
> I think the above should be:
>
>           jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);
>
> [...]
> > @@ -1326,22 +1326,23 @@ static void jme_link_change_work(struct
> > work_struct *work)
> >                 jme_start_shutdown_timer(jme);
> >         }
> >
> > -       goto out_enable_tasklet;
> > +       goto out_enable_bh_work;
> >
> >  err_out_free_rx_resources:
> >         jme_free_rx_resources(jme);
> > -out_enable_tasklet:
> > -       tasklet_enable(&jme->txclean_task);
> > -       tasklet_enable(&jme->rxclean_task);
> > -       tasklet_enable(&jme->rxempty_task);
> > +out_enable_bh_work:
> > +       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > +       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > +       if (jme->rxempty_bh_work_queued)
> > +               enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>
> Missing:
>
>           else
>                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
>
> [...]
> > @@ -3180,9 +3182,9 @@ jme_suspend(struct device *dev)
> >         netif_stop_queue(netdev);
> >         jme_stop_irq(jme);
> >
> > -       tasklet_disable(&jme->txclean_task);
> > -       tasklet_disable(&jme->rxclean_task);
> > -       tasklet_disable(&jme->rxempty_task);
> > +       disable_work_sync(&jme->txclean_bh_work);
> > +       disable_work_sync(&jme->rxclean_bh_work);
> > +       disable_work_sync(&jme->rxempty_bh_work);
>
> should be:
>
>           jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);
>
>
> >
> > @@ -3198,9 +3200,10 @@ jme_suspend(struct device *dev)
> >                 jme->phylink = 0;
> >         }
> >
> > -       tasklet_enable(&jme->txclean_task);
> > -       tasklet_enable(&jme->rxclean_task);
> > -       tasklet_enable(&jme->rxempty_task);
> > +       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > +       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > +       jme->rxempty_bh_work_queued = true;
> > +       enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>
> should be:
>
>         if (jme->rxempty_bh_work_queued)
>                 enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>         else
>                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
>
> I think the above ones are the only places where you need to touch
> 'rxempty_bh_work_queued'.
>
>
> [...]
> >   Do we need a flag for rxclean and txclean too?
>
> Functionally speaking I don't think it will be necessary, as
> rxclean_bh_work() and txclean_bh_work() don't emit warnings on spurious
> invocation.
>
> Thanks,
>
> Paolo
>

Thank you very much. Will send out v3 later today with these changes.
Note, it will be as follows, enable_work() does not have workqueue type.

+  if (jme->rxempty_bh_work_queued)
+                 enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
+         else
-                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
+                enable_work(&jme->rxempty_bh_work);

Thanks,
Allen
>


-- 
       - Allen

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
  2024-07-15 17:50         ` Allen
@ 2024-07-16  8:47           ` Paolo Abeni
  2024-07-17 16:55             ` Allen
  0 siblings, 1 reply; 25+ messages in thread
From: Paolo Abeni @ 2024-07-16  8:47 UTC (permalink / raw)
  To: Allen
  Cc: kuba, Guo-Fu Tseng, David S. Miller, Eric Dumazet, jes, kda,
	cai.huoqing, dougmill, npiggin, christophe.leroy, aneesh.kumar,
	naveen.n.rao, nnac123, tlfalcon, marcin.s.wojtas, mlindner,
	stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo, matthias.bgg,
	angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, netdev

On 7/15/24 19:50, Allen wrote:
> Thank you very much. Will send out v3 later today with these changes.
> Note, it will be as follows, enable_work() does not have workqueue type.
> 
> +  if (jme->rxempty_bh_work_queued)
> +                 enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
> +         else
> -                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
> +                enable_work(&jme->rxempty_bh_work);

Yup, sorry I was very hasty.

More important topic: net-next is currently closed for the merge window, 
You will have to wait to post the new revision of this series until we 
re-open net-next in ~2w.

Thanks,

Paolo


^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
  2024-07-16  8:47           ` Paolo Abeni
@ 2024-07-17 16:55             ` Allen
  0 siblings, 0 replies; 25+ messages in thread
From: Allen @ 2024-07-17 16:55 UTC (permalink / raw)
  To: Paolo Abeni
  Cc: kuba, Guo-Fu Tseng, David S. Miller, Eric Dumazet, jes, kda,
	cai.huoqing, dougmill, npiggin, christophe.leroy, aneesh.kumar,
	naveen.n.rao, nnac123, tlfalcon, marcin.s.wojtas, mlindner,
	stephen, nbd, sean.wang, Mark-MC.Lee, lorenzo, matthias.bgg,
	angelogioacchino.delregno, borisp, bryan.whitehead,
	UNGLinuxDriver, louis.peens, richardcochran, linux-rdma,
	linux-kernel, linux-acenic, linux-net-drivers, netdev

> > Thank you very much. Will send out v3 later today with these changes.
> > Note, it will be as follows, enable_work() does not have workqueue type.
> >
> > +  if (jme->rxempty_bh_work_queued)
> > +                 enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
> > +         else
> > -                 enable_work(system_bh_wq, &jme->rxempty_bh_work);
> > +                enable_work(&jme->rxempty_bh_work);
>
> Yup, sorry I was very hasty.
>
> More important topic: net-next is currently closed for the merge window,
> You will have to wait to post the new revision of this series until we
> re-open net-next in ~2w.
>

Noted. And Thank you for the heads up.
Meanwhile, I will prepare the second series which can go out for review in
two weeks.

Thanks.


       - Allen

^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2024-07-17 16:55 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
2024-06-21  5:05 ` [PATCH 01/15] net: alteon: Convert tasklet API to new bottom half workqueue mechanism Allen Pais
2024-06-21  5:05 ` [PATCH 02/15] net: xgbe: " Allen Pais
2024-06-21  5:05 ` [PATCH 03/15] net: cnic: " Allen Pais
2024-06-21  5:05 ` [PATCH 04/15] net: macb: " Allen Pais
2024-06-21  5:05 ` [PATCH 05/15] net: cavium/liquidio: " Allen Pais
2024-06-21 11:45   ` Sunil Kovvuri Goutham
2024-06-21  5:05 ` [PATCH 06/15] net: octeon: " Allen Pais
2024-06-21  5:05 ` [PATCH 07/15] net: thunderx: " Allen Pais
2024-06-21 11:49   ` Sunil Kovvuri Goutham
2024-06-21  5:05 ` [PATCH 08/15] net: chelsio: " Allen Pais
2024-06-21  5:05 ` [PATCH 09/15] net: sundance: " Allen Pais
2024-06-21  5:05 ` [PATCH 10/15] net: hinic: " Allen Pais
2024-06-25 10:47   ` Paolo Abeni
2024-06-21  5:05 ` [PATCH 11/15] net: ehea: " Allen Pais
2024-06-21  5:05 ` [PATCH 12/15] net: ibmvnic: " Allen Pais
2024-06-21  5:05 ` [PATCH 13/15] net: jme: " Allen Pais
2024-06-25 10:38   ` Paolo Abeni
2024-07-01 10:13     ` Allen
2024-07-01 14:23       ` Paolo Abeni
2024-07-15 17:50         ` Allen
2024-07-16  8:47           ` Paolo Abeni
2024-07-17 16:55             ` Allen
2024-06-21  5:05 ` [PATCH 14/15] net: marvell: " Allen Pais
2024-06-21  5:05 ` [PATCH 15/15] net: mtk-wed: " Allen Pais
2024-06-21 18:39 [PATCH 00/15] ethernet: Convert from tasklet to BH workqueue Allen Pais
2024-06-21 18:39 ` [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism Allen Pais

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).