* [PATCH 06/10] scsi/ibmvscsi_tgt: Replace work tasklet with threaded irq
[not found] <20220530231512.9729-1-dave@stgolabs.net>
@ 2022-05-30 23:15 ` Davidlohr Bueso
2022-06-03 11:05 ` Sebastian Andrzej Siewior
2022-05-30 23:15 ` [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work Davidlohr Bueso
2022-05-30 23:15 ` [PATCH 09/10] scsi/ibmvscsi: Replace srp " Davidlohr Bueso
2 siblings, 1 reply; 10+ messages in thread
From: Davidlohr Bueso @ 2022-05-30 23:15 UTC (permalink / raw)
To: linux-scsi
Cc: ejb, martin.petersen, bigeasy, Michael Cyr, dave, tglx,
linuxppc-dev
Tasklets have long been deprecated as being too heavy on the system
by running in irq context - and this is not a performance critical
path. If a higher priority process wants to run, it must wait for
the tasklet to finish before doing so. A more suitable equivalent
is to converted to threaded irq instead and deal with the async
work in task context.
Cc: Michael Cyr <mikecyr@linux.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 17 +++++++----------
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h | 1 -
2 files changed, 7 insertions(+), 11 deletions(-)
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index eee1a24f7e15..fafadb7158a3 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2948,9 +2948,8 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
struct scsi_info *vscsi = data;
vio_disable_interrupts(vscsi->dma_dev);
- tasklet_schedule(&vscsi->work_task);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
/**
@@ -3317,7 +3316,7 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
*
* Note: this is an edge triggered interrupt. It can not be shared.
*/
-static void ibmvscsis_handle_crq(unsigned long data)
+static irqreturn_t ibmvscsis_handle_crq(int irq, void *data)
{
struct scsi_info *vscsi = (struct scsi_info *)data;
struct viosrp_crq *crq;
@@ -3340,7 +3339,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
vscsi->flags, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
- return;
+ goto done;
}
rc = vscsi->flags & SCHEDULE_DISCONNECT;
@@ -3417,6 +3416,8 @@ static void ibmvscsis_handle_crq(unsigned long data)
vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
+done:
+ return IRQ_HANDLED;
}
static int ibmvscsis_probe(struct vio_dev *vdev,
@@ -3530,9 +3531,6 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
hrc, vscsi->client_data.partition_number);
- tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
- (unsigned long)vscsi);
-
init_completion(&vscsi->wait_idle);
init_completion(&vscsi->unconfig);
@@ -3544,7 +3542,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
goto unmap_buf;
}
- rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
+ rc = request_threaded_irq(vdev->irq, ibmvscsis_interrupt,
+ ibmvscsis_handle_crq, 0, "ibmvscsis", vscsi);
if (rc) {
rc = -EPERM;
dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
@@ -3565,7 +3564,6 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
free_buf:
kfree(vscsi->map_buf);
destroy_queue:
- tasklet_kill(&vscsi->work_task);
ibmvscsis_unregister_command_q(vscsi);
ibmvscsis_destroy_command_q(vscsi);
free_timer:
@@ -3602,7 +3600,6 @@ static void ibmvscsis_remove(struct vio_dev *vdev)
dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
DMA_BIDIRECTIONAL);
kfree(vscsi->map_buf);
- tasklet_kill(&vscsi->work_task);
ibmvscsis_destroy_command_q(vscsi);
ibmvscsis_freetimer(vscsi);
ibmvscsis_free_cmds(vscsi);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 7ae074e5d7a1..b66c982b8b00 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -295,7 +295,6 @@ struct scsi_info {
struct vio_dev *dma_dev;
struct srp_target target;
struct ibmvscsis_tport tport;
- struct tasklet_struct work_task;
struct work_struct proc_work;
};
--
2.36.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work
[not found] <20220530231512.9729-1-dave@stgolabs.net>
2022-05-30 23:15 ` [PATCH 06/10] scsi/ibmvscsi_tgt: Replace work tasklet with threaded irq Davidlohr Bueso
@ 2022-05-30 23:15 ` Davidlohr Bueso
2022-06-09 12:30 ` Sebastian Andrzej Siewior
2022-05-30 23:15 ` [PATCH 09/10] scsi/ibmvscsi: Replace srp " Davidlohr Bueso
2 siblings, 1 reply; 10+ messages in thread
From: Davidlohr Bueso @ 2022-05-30 23:15 UTC (permalink / raw)
To: linux-scsi
Cc: Tyrel Datwyler, ejb, martin.petersen, bigeasy, dave, tglx,
linuxppc-dev
Tasklets have long been deprecated as being too heavy on the system
by running in irq context - and this is not a performance critical
path. If a higher priority process wants to run, it must wait for
the tasklet to finish before doing so. Use a workqueue instead and
run in task context - albeit the increased concurrency (tasklets
safe against themselves), but the handler is done under both the
vhost's host_lock + crq.q_lock so should be safe.
Cc: Tyrel Datwyler <tyreld@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
drivers/scsi/ibmvscsi/ibmvfc.c | 21 ++++++++++++---------
drivers/scsi/ibmvscsi/ibmvfc.h | 3 ++-
2 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index d0eab5700dc5..31b1900489e7 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -891,7 +891,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
ibmvfc_dbg(vhost, "Releasing CRQ\n");
free_irq(vdev->irq, vhost);
- tasklet_kill(&vhost->tasklet);
+ cancel_work_sync(&vhost->work);
do {
if (rc)
msleep(100);
@@ -3689,22 +3689,22 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(vhost->host->host_lock, flags);
vio_disable_interrupts(to_vio_dev(vhost->dev));
- tasklet_schedule(&vhost->tasklet);
+ schedule_work(&vhost->work);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return IRQ_HANDLED;
}
/**
- * ibmvfc_tasklet - Interrupt handler tasklet
+ * ibmvfc_work - work handler
* @data: ibmvfc host struct
*
* Returns:
* Nothing
**/
-static void ibmvfc_tasklet(void *data)
+static void ibmvfc_workfn(struct work_struct *work)
{
- struct ibmvfc_host *vhost = data;
- struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_host *vhost;
+ struct vio_dev *vdev;
struct ibmvfc_crq *crq;
struct ibmvfc_async_crq *async;
struct ibmvfc_event *evt, *temp;
@@ -3712,6 +3712,9 @@ static void ibmvfc_tasklet(void *data)
int done = 0;
LIST_HEAD(evt_doneq);
+ vhost = container_of(work, struct ibmvfc_host, work);
+ vdev = to_vio_dev(vhost->dev);
+
spin_lock_irqsave(vhost->host->host_lock, flags);
spin_lock(vhost->crq.q_lock);
while (!done) {
@@ -5722,7 +5725,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
retrc = 0;
- tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
+ INIT_WORK(&vhost->work, ibmvfc_workfn);
if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
@@ -5738,7 +5741,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
return retrc;
req_irq_failed:
- tasklet_kill(&vhost->tasklet);
+ cancel_work_sync(&vhost->work);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -6213,7 +6216,7 @@ static int ibmvfc_resume(struct device *dev)
spin_lock_irqsave(vhost->host->host_lock, flags);
vio_disable_interrupts(vdev);
- tasklet_schedule(&vhost->tasklet);
+ schedule_work(&vhost->work);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3718406e0988..7eca3622a2fa 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <scsi/viosrp.h>
#define IBMVFC_NAME "ibmvfc"
@@ -892,7 +893,7 @@ struct ibmvfc_host {
char partition_name[97];
void (*job_step) (struct ibmvfc_host *);
struct task_struct *work_thread;
- struct tasklet_struct tasklet;
+ struct work_struct work;
struct work_struct rport_add_work_q;
wait_queue_head_t init_wait_q;
wait_queue_head_t work_wait_q;
--
2.36.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 09/10] scsi/ibmvscsi: Replace srp tasklet with work
[not found] <20220530231512.9729-1-dave@stgolabs.net>
2022-05-30 23:15 ` [PATCH 06/10] scsi/ibmvscsi_tgt: Replace work tasklet with threaded irq Davidlohr Bueso
2022-05-30 23:15 ` [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work Davidlohr Bueso
@ 2022-05-30 23:15 ` Davidlohr Bueso
2022-06-09 15:02 ` Sebastian Andrzej Siewior
2 siblings, 1 reply; 10+ messages in thread
From: Davidlohr Bueso @ 2022-05-30 23:15 UTC (permalink / raw)
To: linux-scsi
Cc: Tyrel Datwyler, ejb, martin.petersen, bigeasy, dave, tglx,
linuxppc-dev
Tasklets have long been deprecated as being too heavy on the system
by running in irq context - and this is not a performance critical
path. If a higher priority process wants to run, it must wait for
the tasklet to finish before doing so.
Process srps asynchronously in process context in a dedicated
single threaded workqueue.
Cc: Tyrel Datwyler <tyreld@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
drivers/scsi/ibmvscsi/ibmvscsi.c | 38 ++++++++++++++++++++++----------
drivers/scsi/ibmvscsi/ibmvscsi.h | 3 ++-
2 files changed, 28 insertions(+), 13 deletions(-)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 63f32f843e75..37cbea8bb0af 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -86,6 +86,8 @@ static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
static struct scsi_transport_template *ibmvscsi_transport_template;
+static struct workqueue_struct *ibmvscsi_wq;
+
#define IBMVSCSI_VERSION "1.5.9"
MODULE_DESCRIPTION("IBM Virtual SCSI");
@@ -117,7 +119,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
* @irq: number of irq to handle, not used
* @dev_instance: ibmvscsi_host_data of host that received interrupt
*
- * Disables interrupts and schedules srp_task
+ * Disables interrupts and schedules srp_work
* Always returns IRQ_HANDLED
*/
static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
@@ -125,7 +127,7 @@ static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)dev_instance;
vio_disable_interrupts(to_vio_dev(hostdata->dev));
- tasklet_schedule(&hostdata->srp_task);
+ queue_work(ibmvscsi_wq, &hostdata->srp_work);
return IRQ_HANDLED;
}
@@ -145,7 +147,7 @@ static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
long rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
free_irq(vdev->irq, (void *)hostdata);
- tasklet_kill(&hostdata->srp_task);
+ cancel_work_sync(&hostdata->srp_work);
do {
if (rc)
msleep(100);
@@ -206,16 +208,19 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
}
/**
- * ibmvscsi_task: - Process srps asynchronously
+ * ibmvscsi_workfn: - Process srps asynchronously
* @data: ibmvscsi_host_data of host
*/
-static void ibmvscsi_task(void *data)
+static void ibmvscsi_workfn(struct work_struct *work)
{
- struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
- struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ struct ibmvscsi_host_data *hostdata;
+ struct vio_dev *vdev;
struct viosrp_crq *crq;
int done = 0;
+ hostdata = container_of(work, struct ibmvscsi_host_data, srp_work);
+ vdev = to_vio_dev(hostdata->dev);
+
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
@@ -367,8 +372,7 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
queue->cur = 0;
spin_lock_init(&queue->lock);
- tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
- (unsigned long)hostdata);
+ INIT_WORK(&hostdata->srp_work, ibmvscsi_workfn);
if (request_irq(vdev->irq,
ibmvscsi_handle_event,
@@ -387,7 +391,7 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
return retrc;
req_irq_failed:
- tasklet_kill(&hostdata->srp_task);
+ cancel_work_sync(&hostdata->srp_work);
rc = 0;
do {
if (rc)
@@ -2371,7 +2375,7 @@ static int ibmvscsi_resume(struct device *dev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
vio_disable_interrupts(to_vio_dev(hostdata->dev));
- tasklet_schedule(&hostdata->srp_task);
+ queue_work(ibmvscsi_wq, &hostdata->srp_work);
return 0;
}
@@ -2418,15 +2422,25 @@ static int __init ibmvscsi_module_init(void)
if (!ibmvscsi_transport_template)
return -ENOMEM;
+ ibmvscsi_wq = alloc_ordered_workqueue("ibmvscsi_wq", 0);
+ if (!ibmvscsi_wq) {
+ srp_release_transport(ibmvscsi_transport_template);
+ return -ENOMEM;
+ }
+
ret = vio_register_driver(&ibmvscsi_driver);
- if (ret)
+ if (ret) {
+ destroy_workqueue(ibmvscsi_wq);
srp_release_transport(ibmvscsi_transport_template);
+ }
+
return ret;
}
static void __exit ibmvscsi_module_exit(void)
{
vio_unregister_driver(&ibmvscsi_driver);
+ destroy_workqueue(ibmvscsi_wq);
srp_release_transport(ibmvscsi_transport_template);
}
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index e60916ef7a49..f7c52744a206 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/completion.h>
+#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <scsi/viosrp.h>
@@ -90,7 +91,7 @@ struct ibmvscsi_host_data {
struct device *dev;
struct event_pool pool;
struct crq_queue queue;
- struct tasklet_struct srp_task;
+ struct work_struct srp_work;
struct list_head sent;
struct Scsi_Host *host;
struct task_struct *work_thread;
--
2.36.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 06/10] scsi/ibmvscsi_tgt: Replace work tasklet with threaded irq
2022-05-30 23:15 ` [PATCH 06/10] scsi/ibmvscsi_tgt: Replace work tasklet with threaded irq Davidlohr Bueso
@ 2022-06-03 11:05 ` Sebastian Andrzej Siewior
0 siblings, 0 replies; 10+ messages in thread
From: Sebastian Andrzej Siewior @ 2022-06-03 11:05 UTC (permalink / raw)
To: Davidlohr Bueso
Cc: ejb, linux-scsi, martin.petersen, Michael Cyr, tglx, linuxppc-dev
On 2022-05-30 16:15:08 [-0700], Davidlohr Bueso wrote:
> diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
> index eee1a24f7e15..fafadb7158a3 100644
> --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
> +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
> @@ -2948,9 +2948,8 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
> struct scsi_info *vscsi = data;
>
> vio_disable_interrupts(vscsi->dma_dev);
> - tasklet_schedule(&vscsi->work_task);
looks good.
> - return IRQ_HANDLED;
> + return IRQ_WAKE_THREAD;
> }
>
> /**
> @@ -3340,7 +3339,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
> dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
> vscsi->flags, vscsi->state);
> spin_unlock_bh(&vscsi->intr_lock);
So if you move it away from from tasklet you can replace the spin_lock_bh()
with spin_lock() since BH does not matter anymore. Except if there is a
timer because it matters for a timer_list timer which is invoked in
softirq context. This isn't the case except that there is a hrtimer
invoking ibmvscsis_service_wait_q(). This is bad because a hrtimer is
which is invoked in hard-irq context so a BH lock must not be acquired.
lockdep would scream here. You could use HRTIMER_MODE_REL_SOFT which
would make it a BH timer. Then you could keep the BH locking but
actually you want to get rid of it ;)
> - return;
> + goto done;
> }
>
> rc = vscsi->flags & SCHEDULE_DISCONNECT;
Sebastian
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work
2022-05-30 23:15 ` [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work Davidlohr Bueso
@ 2022-06-09 12:30 ` Sebastian Andrzej Siewior
2022-06-28 15:18 ` Davidlohr Bueso
0 siblings, 1 reply; 10+ messages in thread
From: Sebastian Andrzej Siewior @ 2022-06-09 12:30 UTC (permalink / raw)
To: Davidlohr Bueso
Cc: Tyrel Datwyler, ejb, linux-scsi, martin.petersen, tglx,
linuxppc-dev
On 2022-05-30 16:15:10 [-0700], Davidlohr Bueso wrote:
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index d0eab5700dc5..31b1900489e7 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -891,7 +891,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
>
> ibmvfc_dbg(vhost, "Releasing CRQ\n");
> free_irq(vdev->irq, vhost);
> - tasklet_kill(&vhost->tasklet);
> + cancel_work_sync(&vhost->work);
s/ {8}/\t/
is there a reason not to use threaded interrupts? The workqueue _might_
migrate to another CPU. The locking ensures that nothing bad happens but
ibmvfc_tasklet() has this piece:
| spin_lock_irqsave(vhost->host->host_lock, flags);
…
| while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
| ibmvfc_handle_async(async, vhost);
| async->valid = 0;
| wmb();
| }
…
| vio_enable_interrupts(vdev);
potentially enables interrupts which fires right away.
| if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
| vio_disable_interrupts(vdev);
disables it again.
| }
|
| spin_unlock(vhost->crq.q_lock);
| spin_unlock_irqrestore(vhost->host->host_lock, flags);
If the worker runs on another CPU then the CPU servicing the interrupt
will be blocked on the lock which is not nice.
My guess is that you could enable interrupts right before unlocking but
is a different story.
> do {
> if (rc)
> msleep(100);
Sebastian
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 09/10] scsi/ibmvscsi: Replace srp tasklet with work
2022-05-30 23:15 ` [PATCH 09/10] scsi/ibmvscsi: Replace srp " Davidlohr Bueso
@ 2022-06-09 15:02 ` Sebastian Andrzej Siewior
2022-06-09 15:46 ` David Laight
0 siblings, 1 reply; 10+ messages in thread
From: Sebastian Andrzej Siewior @ 2022-06-09 15:02 UTC (permalink / raw)
To: Davidlohr Bueso
Cc: Tyrel Datwyler, ejb, linux-scsi, martin.petersen, tglx,
linuxppc-dev
On 2022-05-30 16:15:11 [-0700], Davidlohr Bueso wrote:
> Tasklets have long been deprecated as being too heavy on the system
> by running in irq context - and this is not a performance critical
> path. If a higher priority process wants to run, it must wait for
> the tasklet to finish before doing so.
>
> Process srps asynchronously in process context in a dedicated
> single threaded workqueue.
I would suggest threaded interrupts instead. The pattern here is the
same as in the previous driver except here is less locking.
Sebastian
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [PATCH 09/10] scsi/ibmvscsi: Replace srp tasklet with work
2022-06-09 15:02 ` Sebastian Andrzej Siewior
@ 2022-06-09 15:46 ` David Laight
2022-06-14 13:25 ` 'Sebastian Andrzej Siewior'
0 siblings, 1 reply; 10+ messages in thread
From: David Laight @ 2022-06-09 15:46 UTC (permalink / raw)
To: 'Sebastian Andrzej Siewior', Davidlohr Bueso
Cc: Tyrel Datwyler, ejb@linux.ibm.com, linux-scsi@vger.kernel.org,
martin.petersen@oracle.com, tglx@linutronix.de,
linuxppc-dev@lists.ozlabs.org
From: Sebastian Andrzej Siewior
> Sent: 09 June 2022 16:03
>
> On 2022-05-30 16:15:11 [-0700], Davidlohr Bueso wrote:
> > Tasklets have long been deprecated as being too heavy on the system
> > by running in irq context - and this is not a performance critical
> > path. If a higher priority process wants to run, it must wait for
> > the tasklet to finish before doing so.
> >
> > Process srps asynchronously in process context in a dedicated
> > single threaded workqueue.
>
> I would suggest threaded interrupts instead. The pattern here is the
> same as in the previous driver except here is less locking.
How long do these actions runs for, and what is waiting for
them to finish?
These changes seem to drop the priority from above that of the
highest priority RT process down to that of a default priority
user process.
There is no real guarantee that the latter will run 'any time soon'.
Consider some workloads I'm setting up where most of the cpu are
likely to spend 90%+ of the time running processes under the RT
scheduler that are processing audio.
It is quite likely that a non-RT thread (especially one bound
to a specific cpu) won't run for several milliseconds.
(We have to go through 'hoops' to avoid dropping ethernet frames.)
I'd have thought that some of these kernel threads really
need to run at a 'middling' RT priority.
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 09/10] scsi/ibmvscsi: Replace srp tasklet with work
2022-06-09 15:46 ` David Laight
@ 2022-06-14 13:25 ` 'Sebastian Andrzej Siewior'
2022-06-14 13:34 ` David Laight
0 siblings, 1 reply; 10+ messages in thread
From: 'Sebastian Andrzej Siewior' @ 2022-06-14 13:25 UTC (permalink / raw)
To: David Laight
Cc: Tyrel Datwyler, Davidlohr Bueso, linux-scsi@vger.kernel.org,
martin.petersen@oracle.com, ejb@linux.ibm.com, tglx@linutronix.de,
linuxppc-dev@lists.ozlabs.org
On 2022-06-09 15:46:04 [+0000], David Laight wrote:
> From: Sebastian Andrzej Siewior
> > Sent: 09 June 2022 16:03
> >
> > On 2022-05-30 16:15:11 [-0700], Davidlohr Bueso wrote:
> > > Tasklets have long been deprecated as being too heavy on the system
> > > by running in irq context - and this is not a performance critical
> > > path. If a higher priority process wants to run, it must wait for
> > > the tasklet to finish before doing so.
> > >
> > > Process srps asynchronously in process context in a dedicated
> > > single threaded workqueue.
> >
> > I would suggest threaded interrupts instead. The pattern here is the
> > same as in the previous driver except here is less locking.
>
> How long do these actions runs for, and what is waiting for
> them to finish?
That is something that one with hardware and workload can answer.
> These changes seem to drop the priority from above that of the
> highest priority RT process down to that of a default priority
> user process.
> There is no real guarantee that the latter will run 'any time soon'.
Not sure I can follow. Using threaded interrupts will run at FIFO-50 by
default. Workqueue however is SCHED_OTHER. But then it is not bound to
any CPU so it will run on an available CPU.
> Consider some workloads I'm setting up where most of the cpu are
> likely to spend 90%+ of the time running processes under the RT
> scheduler that are processing audio.
>
> It is quite likely that a non-RT thread (especially one bound
> to a specific cpu) won't run for several milliseconds.
> (We have to go through 'hoops' to avoid dropping ethernet frames.)
>
> I'd have thought that some of these kernel threads really
> need to run at a 'middling' RT priority.
The threaded interrupts do this by default. If you run your own RT
threads you need to decide if they are more or less important than the
interrupts.
> David
Sebastian
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [PATCH 09/10] scsi/ibmvscsi: Replace srp tasklet with work
2022-06-14 13:25 ` 'Sebastian Andrzej Siewior'
@ 2022-06-14 13:34 ` David Laight
0 siblings, 0 replies; 10+ messages in thread
From: David Laight @ 2022-06-14 13:34 UTC (permalink / raw)
To: 'Sebastian Andrzej Siewior'
Cc: Tyrel Datwyler, Davidlohr Bueso, linux-scsi@vger.kernel.org,
martin.petersen@oracle.com, ejb@linux.ibm.com, tglx@linutronix.de,
linuxppc-dev@lists.ozlabs.org
From: Sebastian Andrzej Siewior
> Sent: 14 June 2022 14:26
...
> > These changes seem to drop the priority from above that of the
> > highest priority RT process down to that of a default priority
> > user process.
> > There is no real guarantee that the latter will run 'any time soon'.
>
> Not sure I can follow. Using threaded interrupts will run at FIFO-50 by
> default. Workqueue however is SCHED_OTHER. But then it is not bound to
> any CPU so it will run on an available CPU.
Ok, I'd only looked at normal workqueues, softints and napi.
They are all SCHED_OTHER.
Unbound FIFO is moderately ok - they are sticky but can move.
The only problem is that they won't move if a process is
spinning in kernel on the cpu they last run on.
David
-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work
2022-06-09 12:30 ` Sebastian Andrzej Siewior
@ 2022-06-28 15:18 ` Davidlohr Bueso
0 siblings, 0 replies; 10+ messages in thread
From: Davidlohr Bueso @ 2022-06-28 15:18 UTC (permalink / raw)
To: Sebastian Andrzej Siewior
Cc: Tyrel Datwyler, ejb, linux-scsi, martin.petersen, tglx,
linuxppc-dev
On Thu, 09 Jun 2022, Sebastian Andrzej Siewior wrote:
>On 2022-05-30 16:15:10 [-0700], Davidlohr Bueso wrote:
>> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
>> index d0eab5700dc5..31b1900489e7 100644
>> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
>> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
>> @@ -891,7 +891,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
>>
>> ibmvfc_dbg(vhost, "Releasing CRQ\n");
>> free_irq(vdev->irq, vhost);
>> - tasklet_kill(&vhost->tasklet);
>> + cancel_work_sync(&vhost->work);
>s/ {8}/\t/
>
>is there a reason not to use threaded interrupts?
I went with a workqueue here because the resume from suspend also schedules
async processing, so threaded irqs didn't seem like a good fit. This is also
similar to patch 2 (but in that case I overlooked the resume caller which you
pointed out).
Thanks,
Davidlohr
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2022-06-28 15:33 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20220530231512.9729-1-dave@stgolabs.net>
2022-05-30 23:15 ` [PATCH 06/10] scsi/ibmvscsi_tgt: Replace work tasklet with threaded irq Davidlohr Bueso
2022-06-03 11:05 ` Sebastian Andrzej Siewior
2022-05-30 23:15 ` [PATCH 08/10] scsi/ibmvfc: Replace tasklet with work Davidlohr Bueso
2022-06-09 12:30 ` Sebastian Andrzej Siewior
2022-06-28 15:18 ` Davidlohr Bueso
2022-05-30 23:15 ` [PATCH 09/10] scsi/ibmvscsi: Replace srp " Davidlohr Bueso
2022-06-09 15:02 ` Sebastian Andrzej Siewior
2022-06-09 15:46 ` David Laight
2022-06-14 13:25 ` 'Sebastian Andrzej Siewior'
2022-06-14 13:34 ` David Laight
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).