From: Davidlohr Bueso <dave@stgolabs.net>
To: linux-scsi@vger.kernel.org
Cc: Tyrel Datwyler <tyreld@linux.ibm.com>,
ejb@linux.ibm.com, martin.petersen@oracle.com,
bigeasy@linutronix.de, dave@stgolabs.net, tglx@linutronix.de,
linuxppc-dev@lists.ozlabs.org
Subject: [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work
Date: Mon, 30 May 2022 16:15:10 -0700 [thread overview]
Message-ID: <20220530231512.9729-9-dave@stgolabs.net> (raw)
In-Reply-To: <20220530231512.9729-1-dave@stgolabs.net>
Tasklets have long been deprecated as being too heavy on the system
by running in irq context - and this is not a performance critical
path. If a higher priority process wants to run, it must wait for
the tasklet to finish before doing so. Use a workqueue instead and
run in task context - albeit the increased concurrency (tasklets
safe against themselves), but the handler is done under both the
vhost's host_lock + crq.q_lock so should be safe.
Cc: Tyrel Datwyler <tyreld@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
drivers/scsi/ibmvscsi/ibmvfc.c | 21 ++++++++++++---------
drivers/scsi/ibmvscsi/ibmvfc.h | 3 ++-
2 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index d0eab5700dc5..31b1900489e7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -891,7 +891,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
ibmvfc_dbg(vhost, "Releasing CRQ\n");
free_irq(vdev->irq, vhost);
- tasklet_kill(&vhost->tasklet);
+ cancel_work_sync(&vhost->work);
do {
if (rc)
msleep(100);
@@ -3689,22 +3689,22 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(vhost->host->host_lock, flags);
vio_disable_interrupts(to_vio_dev(vhost->dev));
- tasklet_schedule(&vhost->tasklet);
+ schedule_work(&vhost->work);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return IRQ_HANDLED;
}
/**
- * ibmvfc_tasklet - Interrupt handler tasklet
+ * ibmvfc_work - work handler
* @data: ibmvfc host struct
*
* Returns:
* Nothing
**/
-static void ibmvfc_tasklet(void *data)
+static void ibmvfc_workfn(struct work_struct *work)
{
- struct ibmvfc_host *vhost = data;
- struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_host *vhost;
+ struct vio_dev *vdev;
struct ibmvfc_crq *crq;
struct ibmvfc_async_crq *async;
struct ibmvfc_event *evt, *temp;
@@ -3712,6 +3712,9 @@ static void ibmvfc_tasklet(void *data)
int done = 0;
LIST_HEAD(evt_doneq);
+ vhost = container_of(work, struct ibmvfc_host, work);
+ vdev = to_vio_dev(vhost->dev);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
spin_lock(vhost->crq.q_lock);
while (!done) {
@@ -5722,7 +5725,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
retrc = 0;
- tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
+ INIT_WORK(&vhost->work, ibmvfc_workfn);
if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
@@ -5738,7 +5741,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
return retrc;
req_irq_failed:
- tasklet_kill(&vhost->tasklet);
+ cancel_work_sync(&vhost->work);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -6213,7 +6216,7 @@ static int ibmvfc_resume(struct device *dev)
spin_lock_irqsave(vhost->host->host_lock, flags);
vio_disable_interrupts(vdev);
- tasklet_schedule(&vhost->tasklet);
+ schedule_work(&vhost->work);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3718406e0988..7eca3622a2fa 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <scsi/viosrp.h>
#define IBMVFC_NAME "ibmvfc"
@@ -892,7 +893,7 @@ struct ibmvfc_host {
char partition_name[97];
void (*job_step) (struct ibmvfc_host *);
struct task_struct *work_thread;
- struct tasklet_struct tasklet;
+ struct work_struct work;
struct work_struct rport_add_work_q;
wait_queue_head_t init_wait_q;
wait_queue_head_t work_wait_q;
--
2.36.1
next prev parent reply other threads:[~2022-05-30 23:17 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20220530231512.9729-1-dave@stgolabs.net>
2022-05-30 23:15 ` [PATCH 06/10] scsi/ibmvscsi_tgt: Replace work tasklet with threaded irq Davidlohr Bueso
2022-06-03 11:05 ` Sebastian Andrzej Siewior
2022-05-30 23:15 ` Davidlohr Bueso [this message]
2022-06-09 12:30 ` [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work Sebastian Andrzej Siewior
2022-06-28 15:18 ` Davidlohr Bueso
2022-05-30 23:15 ` [PATCH 09/10] scsi/ibmvscsi: Replace srp " Davidlohr Bueso
2022-06-09 15:02 ` Sebastian Andrzej Siewior
2022-06-09 15:46 ` David Laight
2022-06-14 13:25 ` 'Sebastian Andrzej Siewior'
2022-06-14 13:34 ` David Laight
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220530231512.9729-9-dave@stgolabs.net \
--to=dave@stgolabs.net \
--cc=bigeasy@linutronix.de \
--cc=ejb@linux.ibm.com \
--cc=linux-scsi@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=martin.petersen@oracle.com \
--cc=tglx@linutronix.de \
--cc=tyreld@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).