From: Allen Pais <allen.lkml@gmail.com>
To: kuba@kernel.org, Guo-Fu Tseng <cooldavid@cooldavid.org>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Paolo Abeni <pabeni@redhat.com>
Cc: jes@trained-monkey.org, kda@linux-powerpc.org,
cai.huoqing@linux.dev, dougmill@linux.ibm.com, npiggin@gmail.com,
christophe.leroy@csgroup.eu, aneesh.kumar@kernel.org,
naveen.n.rao@linux.ibm.com, nnac123@linux.ibm.com,
tlfalcon@linux.ibm.com, marcin.s.wojtas@gmail.com,
mlindner@marvell.com, stephen@networkplumber.org, nbd@nbd.name,
sean.wang@mediatek.com, Mark-MC.Lee@mediatek.com,
lorenzo@kernel.org, matthias.bgg@gmail.com,
angelogioacchino.delregno@collabora.com, borisp@nvidia.com,
bryan.whitehead@microchip.com, UNGLinuxDriver@microchip.com,
louis.peens@corigine.com, richardcochran@gmail.com,
linux-rdma@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-acenic@sunsite.dk, linux-net-drivers@amd.com,
netdev@vger.kernel.org, Allen Pais <allen.lkml@gmail.com>
Subject: [net-next v3 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
Date: Tue, 30 Jul 2024 11:34:01 -0700 [thread overview]
Message-ID: <20240730183403.4176544-14-allen.lkml@gmail.com> (raw)
In-Reply-To: <20240730183403.4176544-1-allen.lkml@gmail.com>
Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the jme driver. This transition ensures compatibility
with the latest design and enhances performance.
We should queue the work only if it was queued at cancel time.
Introduce rxempty_bh_work_queued, suggested by Paolo Abeni.
Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
drivers/net/ethernet/jme.c | 80 +++++++++++++++++++++-----------------
drivers/net/ethernet/jme.h | 9 +++--
2 files changed, 49 insertions(+), 40 deletions(-)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..bdaeaeb477e4 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
if (dpi->attempt < dpi->cur)
- tasklet_schedule(&jme->rxclean_task);
+ queue_work(system_bh_wq, &jme->rxclean_bh_work);
jme_set_rx_pcc(jme, dpi->attempt);
dpi->cur = dpi->attempt;
dpi->cnt = 0;
@@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
}
static void
-jme_pcc_tasklet(struct tasklet_struct *t)
+jme_pcc_bh_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
+ struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
struct net_device *netdev = jme->dev;
if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
jme_stop_shutdown_timer(jme);
jme_stop_pcc_timer(jme);
- tasklet_disable(&jme->txclean_task);
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
+ disable_work_sync(&jme->txclean_bh_work);
+ disable_work_sync(&jme->rxclean_bh_work);
+ jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);
if (netif_carrier_ok(netdev)) {
jme_disable_rx_engine(jme);
@@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
rc = jme_setup_rx_resources(jme);
if (rc) {
pr_err("Allocating resources for RX error, Device STOPPED!\n");
- goto out_enable_tasklet;
+ goto out_enable_bh_work;
}
rc = jme_setup_tx_resources(jme);
@@ -1326,22 +1326,26 @@ static void jme_link_change_work(struct work_struct *work)
jme_start_shutdown_timer(jme);
}
- goto out_enable_tasklet;
+ goto out_enable_bh_work;
err_out_free_rx_resources:
jme_free_rx_resources(jme);
-out_enable_tasklet:
- tasklet_enable(&jme->txclean_task);
- tasklet_enable(&jme->rxclean_task);
- tasklet_enable(&jme->rxempty_task);
+out_enable_bh_work:
+ enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+ enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+ if (jme->rxempty_bh_work_queued)
+ enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
+ else
+ enable_work(&jme->rxempty_bh_work);
+
out:
atomic_inc(&jme->link_changing);
}
static void
-jme_rx_clean_tasklet(struct tasklet_struct *t)
+jme_rx_clean_bh_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
+ struct jme_adapter *jme = from_work(jme, work, rxclean_bh_work);
struct dynpcc_info *dpi = &(jme->dpi);
jme_process_receive(jme, jme->rx_ring_size);
@@ -1374,9 +1378,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
}
static void
-jme_rx_empty_tasklet(struct tasklet_struct *t)
+jme_rx_empty_bh_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
+ struct jme_adapter *jme = from_work(jme, work, rxempty_bh_work);
if (unlikely(atomic_read(&jme->link_changing) != 1))
return;
@@ -1386,7 +1390,7 @@ jme_rx_empty_tasklet(struct tasklet_struct *t)
netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
- jme_rx_clean_tasklet(&jme->rxclean_task);
+ jme_rx_clean_bh_work(&jme->rxclean_bh_work);
while (atomic_read(&jme->rx_empty) > 0) {
atomic_dec(&jme->rx_empty);
@@ -1410,9 +1414,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
}
-static void jme_tx_clean_tasklet(struct tasklet_struct *t)
+static void jme_tx_clean_bh_work(struct work_struct *work)
{
- struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
+ struct jme_adapter *jme = from_work(jme, work, txclean_bh_work);
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1510,12 +1514,12 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
if (intrstat & INTR_TMINTR) {
jwrite32(jme, JME_IEVE, INTR_TMINTR);
- tasklet_schedule(&jme->pcc_task);
+ queue_work(system_bh_wq, &jme->pcc_bh_work);
}
if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
- tasklet_schedule(&jme->txclean_task);
+ queue_work(system_bh_wq, &jme->txclean_bh_work);
}
if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
@@ -1538,9 +1542,9 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
} else {
if (intrstat & INTR_RX0EMP) {
atomic_inc(&jme->rx_empty);
- tasklet_hi_schedule(&jme->rxempty_task);
+ queue_work(system_bh_highpri_wq, &jme->rxempty_bh_work);
} else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
- tasklet_hi_schedule(&jme->rxclean_task);
+ queue_work(system_bh_highpri_wq, &jme->rxclean_bh_work);
}
}
@@ -1826,9 +1830,9 @@ jme_open(struct net_device *netdev)
jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
- tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
- tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
- tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+ INIT_WORK(&jme->txclean_bh_work, jme_tx_clean_bh_work);
+ INIT_WORK(&jme->rxclean_bh_work, jme_rx_clean_bh_work);
+ INIT_WORK(&jme->rxempty_bh_work, jme_rx_empty_bh_work);
rc = jme_request_irq(jme);
if (rc)
@@ -1914,9 +1918,10 @@ jme_close(struct net_device *netdev)
JME_NAPI_DISABLE(jme);
cancel_work_sync(&jme->linkch_task);
- tasklet_kill(&jme->txclean_task);
- tasklet_kill(&jme->rxclean_task);
- tasklet_kill(&jme->rxempty_task);
+ cancel_work_sync(&jme->txclean_bh_work);
+ cancel_work_sync(&jme->rxclean_bh_work);
+ jme->rxempty_bh_work_queued = false;
+ cancel_work_sync(&jme->rxempty_bh_work);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
@@ -3020,7 +3025,7 @@ jme_init_one(struct pci_dev *pdev,
atomic_set(&jme->tx_cleaning, 1);
atomic_set(&jme->rx_empty, 1);
- tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+ INIT_WORK(&jme->pcc_bh_work, jme_pcc_bh_work);
INIT_WORK(&jme->linkch_task, jme_link_change_work);
jme->dpi.cur = PCC_P1;
@@ -3180,9 +3185,9 @@ jme_suspend(struct device *dev)
netif_stop_queue(netdev);
jme_stop_irq(jme);
- tasklet_disable(&jme->txclean_task);
- tasklet_disable(&jme->rxclean_task);
- tasklet_disable(&jme->rxempty_task);
+ disable_work_sync(&jme->txclean_bh_work);
+ disable_work_sync(&jme->rxclean_bh_work);
+ jme->rxempty_bh_work_queued = disable_work_sync(&jme->rxempty_bh_work);
if (netif_carrier_ok(netdev)) {
if (test_bit(JME_FLAG_POLL, &jme->flags))
@@ -3198,9 +3203,12 @@ jme_suspend(struct device *dev)
jme->phylink = 0;
}
- tasklet_enable(&jme->txclean_task);
- tasklet_enable(&jme->rxclean_task);
- tasklet_enable(&jme->rxempty_task);
+ enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+ enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+ if (jme->rxempty_bh_work_queued)
+ enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
+ else
+ enable_work(&jme->rxempty_bh_work);
jme_powersave_phy(jme);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 860494ff3714..44aaf7625dc3 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -406,11 +406,12 @@ struct jme_adapter {
spinlock_t phy_lock;
spinlock_t macaddr_lock;
spinlock_t rxmcs_lock;
- struct tasklet_struct rxempty_task;
- struct tasklet_struct rxclean_task;
- struct tasklet_struct txclean_task;
+ struct work_struct rxempty_bh_work;
+ struct work_struct rxclean_bh_work;
+ struct work_struct txclean_bh_work;
+ bool rxempty_bh_work_queued;
struct work_struct linkch_task;
- struct tasklet_struct pcc_task;
+ struct work_struct pcc_bh_work;
unsigned long flags;
u32 reg_txcs;
u32 reg_txpfc;
--
2.34.1
next prev parent reply other threads:[~2024-07-30 18:34 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-30 18:33 [net-next v3 00/15] ethernet: Convert from tasklet to BH workqueue Allen Pais
2024-07-30 18:33 ` [net-next v3 01/15] net: alteon: Convert tasklet API to new bottom half workqueue mechanism Allen Pais
2024-07-30 18:33 ` [net-next v3 02/15] net: xgbe: " Allen Pais
2024-07-30 18:33 ` [net-next v3 03/15] net: cnic: " Allen Pais
2024-07-30 18:33 ` [net-next v3 04/15] net: macb: " Allen Pais
2024-07-30 18:33 ` [net-next v3 05/15] net: cavium/liquidio: " Allen Pais
2024-08-01 2:08 ` Jakub Kicinski
2024-08-01 22:00 ` Allen
2024-08-02 0:57 ` Jakub Kicinski
2024-08-05 17:23 ` Allen
2024-08-05 19:39 ` Jakub Kicinski
2024-08-07 3:15 ` Allen
2024-08-07 14:37 ` Jakub Kicinski
2024-08-09 2:31 ` Allen
2024-08-10 3:36 ` Jakub Kicinski
2024-08-15 16:45 ` Allen
2024-08-15 23:49 ` Jakub Kicinski
2024-08-17 16:27 ` Allen
2024-07-30 18:33 ` [net-next v3 06/15] net: octeon: " Allen Pais
2024-07-30 18:33 ` [net-next v3 07/15] net: thunderx: " Allen Pais
2024-07-30 18:33 ` [net-next v3 08/15] net: chelsio: " Allen Pais
2024-07-30 18:33 ` [net-next v3 09/15] net: sundance: " Allen Pais
2024-07-30 18:33 ` [net-next v3 10/15] net: hinic: " Allen Pais
2024-07-30 18:33 ` [net-next v3 11/15] net: ehea: " Allen Pais
2024-07-30 18:34 ` [net-next v3 12/15] net: ibmvnic: " Allen Pais
2024-07-30 18:34 ` Allen Pais [this message]
2024-07-30 18:34 ` [net-next v3 14/15] net: marvell: " Allen Pais
2024-07-30 20:39 ` Andrew Lunn
2024-07-30 22:22 ` Russell King (Oracle)
2024-07-31 20:46 ` Allen
2024-07-31 20:41 ` Allen
2024-07-30 18:34 ` [net-next v3 15/15] net: mtk-wed: " Allen Pais
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240730183403.4176544-14-allen.lkml@gmail.com \
--to=allen.lkml@gmail.com \
--cc=Mark-MC.Lee@mediatek.com \
--cc=UNGLinuxDriver@microchip.com \
--cc=aneesh.kumar@kernel.org \
--cc=angelogioacchino.delregno@collabora.com \
--cc=borisp@nvidia.com \
--cc=bryan.whitehead@microchip.com \
--cc=cai.huoqing@linux.dev \
--cc=christophe.leroy@csgroup.eu \
--cc=cooldavid@cooldavid.org \
--cc=davem@davemloft.net \
--cc=dougmill@linux.ibm.com \
--cc=edumazet@google.com \
--cc=jes@trained-monkey.org \
--cc=kda@linux-powerpc.org \
--cc=kuba@kernel.org \
--cc=linux-acenic@sunsite.dk \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-net-drivers@amd.com \
--cc=linux-rdma@vger.kernel.org \
--cc=lorenzo@kernel.org \
--cc=louis.peens@corigine.com \
--cc=marcin.s.wojtas@gmail.com \
--cc=matthias.bgg@gmail.com \
--cc=mlindner@marvell.com \
--cc=naveen.n.rao@linux.ibm.com \
--cc=nbd@nbd.name \
--cc=netdev@vger.kernel.org \
--cc=nnac123@linux.ibm.com \
--cc=npiggin@gmail.com \
--cc=pabeni@redhat.com \
--cc=richardcochran@gmail.com \
--cc=sean.wang@mediatek.com \
--cc=stephen@networkplumber.org \
--cc=tlfalcon@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).