netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Allen <allen.lkml@gmail.com>
To: Paolo Abeni <pabeni@redhat.com>
Cc: kuba@kernel.org, Guo-Fu Tseng <cooldavid@cooldavid.org>,
	 "David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	jes@trained-monkey.org,  kda@linux-powerpc.org,
	cai.huoqing@linux.dev, dougmill@linux.ibm.com,
	 npiggin@gmail.com, christophe.leroy@csgroup.eu,
	aneesh.kumar@kernel.org,  naveen.n.rao@linux.ibm.com,
	nnac123@linux.ibm.com, tlfalcon@linux.ibm.com,
	 marcin.s.wojtas@gmail.com, mlindner@marvell.com,
	stephen@networkplumber.org,  nbd@nbd.name,
	sean.wang@mediatek.com, Mark-MC.Lee@mediatek.com,
	 lorenzo@kernel.org, matthias.bgg@gmail.com,
	 angelogioacchino.delregno@collabora.com, borisp@nvidia.com,
	 bryan.whitehead@microchip.com, UNGLinuxDriver@microchip.com,
	 louis.peens@corigine.com, richardcochran@gmail.com,
	 linux-rdma@vger.kernel.org, linux-kernel@vger.kernel.org,
	 linux-acenic@sunsite.dk, linux-net-drivers@amd.com,
	netdev@vger.kernel.org
Subject: Re: [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism
Date: Mon, 1 Jul 2024 03:13:40 -0700	[thread overview]
Message-ID: <CAOMdWSKKyqaJB2Psgcy9piUv3LTDBHhbo_g404fSmqQrVSyr7Q@mail.gmail.com> (raw)
In-Reply-To: <ba3b8f5907c071e40be68758f2a11662008713e8.camel@redhat.com>

> > Migrate tasklet APIs to the new bottom half workqueue mechanism. It
> > replaces all occurrences of tasklet usage with the appropriate workqueue
> > APIs throughout the jme driver. This transition ensures compatibility
> > with the latest design and enhances performance.
> >
> > Signed-off-by: Allen Pais <allen.lkml@gmail.com>
> > ---
> >  drivers/net/ethernet/jme.c | 72 +++++++++++++++++++-------------------
> >  drivers/net/ethernet/jme.h |  8 ++---
> >  2 files changed, 40 insertions(+), 40 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
> > index b06e24562973..b1a92b851b3b 100644
> > --- a/drivers/net/ethernet/jme.c
> > +++ b/drivers/net/ethernet/jme.c
> > @@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)
> >
> >       if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
> >               if (dpi->attempt < dpi->cur)
> > -                     tasklet_schedule(&jme->rxclean_task);
> > +                     queue_work(system_bh_wq, &jme->rxclean_bh_work);
> >               jme_set_rx_pcc(jme, dpi->attempt);
> >               dpi->cur = dpi->attempt;
> >               dpi->cnt = 0;
> > @@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
> >  }
> >
> >  static void
> > -jme_pcc_tasklet(struct tasklet_struct *t)
> > +jme_pcc_bh_work(struct work_struct *work)
> >  {
> > -     struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
> > +     struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
> >       struct net_device *netdev = jme->dev;
> >
> >       if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
> > @@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
> >               jme_stop_shutdown_timer(jme);
> >
> >       jme_stop_pcc_timer(jme);
> > -     tasklet_disable(&jme->txclean_task);
> > -     tasklet_disable(&jme->rxclean_task);
> > -     tasklet_disable(&jme->rxempty_task);
> > +     disable_work_sync(&jme->txclean_bh_work);
> > +     disable_work_sync(&jme->rxclean_bh_work);
> > +     disable_work_sync(&jme->rxempty_bh_work);
> >
> >       if (netif_carrier_ok(netdev)) {
> >               jme_disable_rx_engine(jme);
> > @@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
> >               rc = jme_setup_rx_resources(jme);
> >               if (rc) {
> >                       pr_err("Allocating resources for RX error, Device STOPPED!\n");
> > -                     goto out_enable_tasklet;
> > +                     goto out_enable_bh_work;
> >               }
> >
> >               rc = jme_setup_tx_resources(jme);
> > @@ -1326,22 +1326,22 @@ static void jme_link_change_work(struct work_struct *work)
> >               jme_start_shutdown_timer(jme);
> >       }
> >
> > -     goto out_enable_tasklet;
> > +     goto out_enable_bh_work;
> >
> >  err_out_free_rx_resources:
> >       jme_free_rx_resources(jme);
> > -out_enable_tasklet:
> > -     tasklet_enable(&jme->txclean_task);
> > -     tasklet_enable(&jme->rxclean_task);
> > -     tasklet_enable(&jme->rxempty_task);
> > +out_enable_bh_work:
> > +     enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
> > +     enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
> > +     enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
>
> This will unconditionally schedule the rxempty_bh_work and is AFAICS a
> different behavior WRT prior this patch.
>
> In turn the rxempty_bh_work() will emit (almost unconditionally) the
> 'RX Queue Full!' message, so the change should be visibile to the user.
>
> I think you should queue the work only if it was queued at cancel time.
> You likely need additional status to do that.
>

 Thank you for taking the time out to review. Now that it's been a week, I was
preparing to send out version 3. Before I do that, I want to make sure if this
the below approach is acceptable.

diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b06e24562973..b3fc2e5c379f 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1141,7 +1141,7 @@ jme_dynamic_pcc(struct jme_adapter *jme)

        if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
                if (dpi->attempt < dpi->cur)
-                       tasklet_schedule(&jme->rxclean_task);
+                       queue_work(system_bh_wq, &jme->rxclean_bh_work);
                jme_set_rx_pcc(jme, dpi->attempt);
                dpi->cur = dpi->attempt;
                dpi->cnt = 0;
@@ -1182,9 +1182,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
 }

 static void
-jme_pcc_tasklet(struct tasklet_struct *t)
+jme_pcc_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
+       struct jme_adapter *jme = from_work(jme, work, pcc_bh_work);
        struct net_device *netdev = jme->dev;

        if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1282,9 +1282,9 @@ static void jme_link_change_work(struct work_struct *work)
                jme_stop_shutdown_timer(jme);

        jme_stop_pcc_timer(jme);
-       tasklet_disable(&jme->txclean_task);
-       tasklet_disable(&jme->rxclean_task);
-       tasklet_disable(&jme->rxempty_task);
+       disable_work_sync(&jme->txclean_bh_work);
+       disable_work_sync(&jme->rxclean_bh_work);
+       disable_work_sync(&jme->rxempty_bh_work);

        if (netif_carrier_ok(netdev)) {
                jme_disable_rx_engine(jme);
@@ -1304,7 +1304,7 @@ static void jme_link_change_work(struct work_struct *work)
                rc = jme_setup_rx_resources(jme);
                if (rc) {
                        pr_err("Allocating resources for RX error,
Device STOPPED!\n");
-                       goto out_enable_tasklet;
+                       goto out_enable_bh_work;
                }

                rc = jme_setup_tx_resources(jme);
@@ -1326,22 +1326,23 @@ static void jme_link_change_work(struct
work_struct *work)
                jme_start_shutdown_timer(jme);
        }

-       goto out_enable_tasklet;
+       goto out_enable_bh_work;

 err_out_free_rx_resources:
        jme_free_rx_resources(jme);
-out_enable_tasklet:
-       tasklet_enable(&jme->txclean_task);
-       tasklet_enable(&jme->rxclean_task);
-       tasklet_enable(&jme->rxempty_task);
+out_enable_bh_work:
+       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+       if (jme->rxempty_bh_work_queued)
+               enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);
 out:
        atomic_inc(&jme->link_changing);
 }

 static void
-jme_rx_clean_tasklet(struct tasklet_struct *t)
+jme_rx_clean_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
+       struct jme_adapter *jme = from_work(jme, work, rxclean_bh_work);
        struct dynpcc_info *dpi = &(jme->dpi);

        jme_process_receive(jme, jme->rx_ring_size);
@@ -1374,9 +1375,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
 }

 static void
-jme_rx_empty_tasklet(struct tasklet_struct *t)
+jme_rx_empty_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
+       struct jme_adapter *jme = from_work(jme, work, rxempty_bh_work);

        if (unlikely(atomic_read(&jme->link_changing) != 1))
                return;
@@ -1386,7 +1387,7 @@ jme_rx_empty_tasklet(struct tasklet_struct *t)

        netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");

-       jme_rx_clean_tasklet(&jme->rxclean_task);
+       jme_rx_clean_bh_work(&jme->rxclean_bh_work);

        while (atomic_read(&jme->rx_empty) > 0) {
                atomic_dec(&jme->rx_empty);
@@ -1410,9 +1411,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)

 }

-static void jme_tx_clean_tasklet(struct tasklet_struct *t)
+static void jme_tx_clean_bh_work(struct work_struct *work)
 {
-       struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
+       struct jme_adapter *jme = from_work(jme, work, txclean_bh_work);
        struct jme_ring *txring = &(jme->txring[0]);
        struct txdesc *txdesc = txring->desc;
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1510,12 +1511,12 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)

        if (intrstat & INTR_TMINTR) {
                jwrite32(jme, JME_IEVE, INTR_TMINTR);
-               tasklet_schedule(&jme->pcc_task);
+               queue_work(system_bh_wq, &jme->pcc_bh_work);
        }

        if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
                jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
-               tasklet_schedule(&jme->txclean_task);
+               queue_work(system_bh_wq, &jme->txclean_bh_work);
        }

        if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
@@ -1538,9 +1539,9 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
        } else {
                if (intrstat & INTR_RX0EMP) {
                        atomic_inc(&jme->rx_empty);
-                       tasklet_hi_schedule(&jme->rxempty_task);
+                       queue_work(system_bh_highpri_wq, &jme->rxempty_bh_work);
                } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
-                       tasklet_hi_schedule(&jme->rxclean_task);
+                       queue_work(system_bh_highpri_wq, &jme->rxclean_bh_work);
                }
        }

@@ -1826,9 +1827,9 @@ jme_open(struct net_device *netdev)
        jme_clear_pm_disable_wol(jme);
        JME_NAPI_ENABLE(jme);

-       tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
-       tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
-       tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+       INIT_WORK(&jme->txclean_bh_work, jme_tx_clean_bh_work);
+       INIT_WORK(&jme->rxclean_bh_work, jme_rx_clean_bh_work);
+       INIT_WORK(&jme->rxempty_bh_work, jme_rx_empty_bh_work);

        rc = jme_request_irq(jme);
        if (rc)
@@ -1914,9 +1915,10 @@ jme_close(struct net_device *netdev)
        JME_NAPI_DISABLE(jme);

        cancel_work_sync(&jme->linkch_task);
-       tasklet_kill(&jme->txclean_task);
-       tasklet_kill(&jme->rxclean_task);
-       tasklet_kill(&jme->rxempty_task);
+       cancel_work_sync(&jme->txclean_bh_work);
+       cancel_work_sync(&jme->rxclean_bh_work);
+       jme->rxempty_bh_work_queued = false;
+       cancel_work_sync(&jme->rxempty_bh_work);

        jme_disable_rx_engine(jme);
        jme_disable_tx_engine(jme);
@@ -3020,7 +3022,7 @@ jme_init_one(struct pci_dev *pdev,
        atomic_set(&jme->tx_cleaning, 1);
        atomic_set(&jme->rx_empty, 1);

-       tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
+       INIT_WORK(&jme->pcc_bh_work, jme_pcc_bh_work);
        INIT_WORK(&jme->linkch_task, jme_link_change_work);
        jme->dpi.cur = PCC_P1;

@@ -3180,9 +3182,9 @@ jme_suspend(struct device *dev)
        netif_stop_queue(netdev);
        jme_stop_irq(jme);

-       tasklet_disable(&jme->txclean_task);
-       tasklet_disable(&jme->rxclean_task);
-       tasklet_disable(&jme->rxempty_task);
+       disable_work_sync(&jme->txclean_bh_work);
+       disable_work_sync(&jme->rxclean_bh_work);
+       disable_work_sync(&jme->rxempty_bh_work);

        if (netif_carrier_ok(netdev)) {
                if (test_bit(JME_FLAG_POLL, &jme->flags))
@@ -3198,9 +3200,10 @@ jme_suspend(struct device *dev)
                jme->phylink = 0;
        }

-       tasklet_enable(&jme->txclean_task);
-       tasklet_enable(&jme->rxclean_task);
-       tasklet_enable(&jme->rxempty_task);
+       enable_and_queue_work(system_bh_wq, &jme->txclean_bh_work);
+       enable_and_queue_work(system_bh_wq, &jme->rxclean_bh_work);
+       jme->rxempty_bh_work_queued = true;
+       enable_and_queue_work(system_bh_wq, &jme->rxempty_bh_work);

        jme_powersave_phy(jme);

diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 860494ff3714..44aaf7625dc3 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -406,11 +406,12 @@ struct jme_adapter {
        spinlock_t              phy_lock;
        spinlock_t              macaddr_lock;
        spinlock_t              rxmcs_lock;
-       struct tasklet_struct   rxempty_task;
-       struct tasklet_struct   rxclean_task;
-       struct tasklet_struct   txclean_task;
+       struct work_struct      rxempty_bh_work;
+       struct work_struct      rxclean_bh_work;
+       struct work_struct      txclean_bh_work;
+       bool                    rxempty_bh_work_queued;
        struct work_struct      linkch_task;
-       struct tasklet_struct   pcc_task;
+       struct work_struct      pcc_bh_work;
        unsigned long           flags;
        u32                     reg_txcs;
        u32                     reg_txpfc;



  Do we need a flag for rxclean and txclean too?

Thanks,
Allen

> Thanks,
>
> Paolo
>


-- 
       - Allen

  reply	other threads:[~2024-07-01 10:13 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20240621050525.3720069-1-allen.lkml@gmail.com>
2024-06-21  5:05 ` [PATCH 01/15] net: alteon: Convert tasklet API to new bottom half workqueue mechanism Allen Pais
2024-06-21  5:05 ` [PATCH 02/15] net: xgbe: " Allen Pais
2024-06-21  5:05 ` [PATCH 03/15] net: cnic: " Allen Pais
2024-06-21  5:05 ` [PATCH 04/15] net: macb: " Allen Pais
2024-06-21  5:05 ` [PATCH 05/15] net: cavium/liquidio: " Allen Pais
2024-06-21 11:45   ` Sunil Kovvuri Goutham
2024-06-21  5:05 ` [PATCH 06/15] net: octeon: " Allen Pais
2024-06-21  5:05 ` [PATCH 07/15] net: thunderx: " Allen Pais
2024-06-21 11:49   ` Sunil Kovvuri Goutham
2024-06-21  5:05 ` [PATCH 08/15] net: chelsio: " Allen Pais
2024-06-21  5:05 ` [PATCH 09/15] net: sundance: " Allen Pais
2024-06-21  5:05 ` [PATCH 10/15] net: hinic: " Allen Pais
2024-06-25 10:47   ` Paolo Abeni
2024-06-21  5:05 ` [PATCH 11/15] net: ehea: " Allen Pais
2024-06-21  5:05 ` [PATCH 12/15] net: ibmvnic: " Allen Pais
2024-06-21  5:05 ` [PATCH 13/15] net: jme: " Allen Pais
2024-06-25 10:38   ` Paolo Abeni
2024-07-01 10:13     ` Allen [this message]
2024-07-01 14:23       ` Paolo Abeni
2024-07-15 17:50         ` Allen
2024-07-16  8:47           ` Paolo Abeni
2024-07-17 16:55             ` Allen
2024-06-21  5:05 ` [PATCH 14/15] net: marvell: " Allen Pais
2024-06-21  5:05 ` [PATCH 15/15] net: mtk-wed: " Allen Pais
2024-06-21 18:39 [PATCH 00/15] ethernet: Convert from tasklet to BH workqueue Allen Pais
2024-06-21 18:39 ` [PATCH 13/15] net: jme: Convert tasklet API to new bottom half workqueue mechanism Allen Pais

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAOMdWSKKyqaJB2Psgcy9piUv3LTDBHhbo_g404fSmqQrVSyr7Q@mail.gmail.com \
    --to=allen.lkml@gmail.com \
    --cc=Mark-MC.Lee@mediatek.com \
    --cc=UNGLinuxDriver@microchip.com \
    --cc=aneesh.kumar@kernel.org \
    --cc=angelogioacchino.delregno@collabora.com \
    --cc=borisp@nvidia.com \
    --cc=bryan.whitehead@microchip.com \
    --cc=cai.huoqing@linux.dev \
    --cc=christophe.leroy@csgroup.eu \
    --cc=cooldavid@cooldavid.org \
    --cc=davem@davemloft.net \
    --cc=dougmill@linux.ibm.com \
    --cc=edumazet@google.com \
    --cc=jes@trained-monkey.org \
    --cc=kda@linux-powerpc.org \
    --cc=kuba@kernel.org \
    --cc=linux-acenic@sunsite.dk \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-net-drivers@amd.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=lorenzo@kernel.org \
    --cc=louis.peens@corigine.com \
    --cc=marcin.s.wojtas@gmail.com \
    --cc=matthias.bgg@gmail.com \
    --cc=mlindner@marvell.com \
    --cc=naveen.n.rao@linux.ibm.com \
    --cc=nbd@nbd.name \
    --cc=netdev@vger.kernel.org \
    --cc=nnac123@linux.ibm.com \
    --cc=npiggin@gmail.com \
    --cc=pabeni@redhat.com \
    --cc=richardcochran@gmail.com \
    --cc=sean.wang@mediatek.com \
    --cc=stephen@networkplumber.org \
    --cc=tlfalcon@linux.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).