From: Kyle Mahlkuch <kmahlkuc@linux.ibm.com>
To: linux-scsi@vger.kernel.org, linux-kernel@vger.kernel.org,
paul.ely@broadcom.com
Cc: thinhtr@linux.ibm.com
Subject: [PATCH 3/3] scsi: fc_transport: vport and rport cleanup synchronization
Date: Thu, 9 Apr 2026 10:12:35 -0500 [thread overview]
Message-ID: <e16406ec-d4d6-4783-b79c-5c00263c133c@linux.ibm.com> (raw)
Imporve synchronization and cleanup logic in the fc_remove_host() and
fc_rport_final_delete() to prevent use-after-free conditions during
host removal
Vport cleanup:
- Mark all vports with FC_VPORT_DELETING under lock
- Cancel all vport work synchronously before removing from the list
- Synchronous deletion with fc_vport_terminate()
Rport cleanup, applied to rports and rport_binding
- Mark all rport with FC_PORTSTATE_DELETED under lock
- Cancel all timers and work synchronously before removing from the list
- Call fc_rport_final_delete() synchronously instead of queuing
fc_rport_final_delete():
- Calling cancel_delayed_work_sync() for any outstanding delayed work
- Clear FC_RPORT_DEVLOSS_PENDING under lock
- Flushing all pending work completes before destruction
Signed-off-by: Thinh Tran <thinhtr@linux.ibm.com>
Signed-off-by: Kyle Mahlkuch <kmahlkuc@linux.ibm.com>
---
drivers/scsi/scsi_transport_fc.c | 85 ++++++++++++++++++++++++--------
1 file changed, 64 insertions(+), 21 deletions(-)
diff --git a/drivers/scsi/scsi_transport_fc.c
b/drivers/scsi/scsi_transport_fc.c
index 123b22b52640..0adb9330befc 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -39,6 +39,7 @@ static void fc_li_stats_update(u16 event_type,
static void fc_delivery_stats_update(u32 reason_code,
struct fc_fpin_stats *stats);
static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats
*stats);
+static void fc_rport_final_delete(struct work_struct *work);
/*
* Module Parameters
@@ -2883,31 +2884,71 @@ fc_remove_host(struct Scsi_Host *shost)
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
unsigned long flags;
- spin_lock_irqsave(shost->host_lock, flags);
-
/* Remove any vports */
+ /* Mark FC_VPORT_DELETING for now */
+ spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
vport->flags |= FC_VPORT_DELETING;
- fc_queue_work(shost, &vport->vport_delete_work);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /*
+ * remove all vport works synchronously BEFORE removing from list.
+ * This prevents use-after-free when timers fire.
+ */
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ /* Cancel any pending work/timers */
+ cancel_work_sync(&vport->vport_delete_work);
+ /* Now safe to do synchronous deletion */
+ fc_vport_terminate(vport);
}
/* Remove any remote ports */
+ /* Mark rports and rport_bindings with FC_PORTSTATE_DELETED for now */
+ spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry_safe(rport, next_rport,
&fc_host->rports, peers) {
- list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
- fc_queue_work(shost, &rport->rport_delete_work);
}
list_for_each_entry_safe(rport, next_rport,
&fc_host->rport_bindings, peers) {
- list_del(&rport->peers);
rport->port_state = FC_PORTSTATE_DELETED;
- fc_queue_work(shost, &rport->rport_delete_work);
}
-
spin_unlock_irqrestore(shost->host_lock, flags);
+ list_for_each_entry_safe(rport, next_rport,
+ &fc_host->rports, peers) {
+ /* Cancel ALL timers and work before removing from list */
+ cancel_delayed_work_sync(&rport->fail_io_work);
+ cancel_delayed_work_sync(&rport->dev_loss_work);
+ cancel_work_sync(&rport->scan_work);
+ cancel_work_sync(&rport->stgt_delete_work);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del(&rport->peers);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* Now safe to do final deletion synchronously */
+ fc_rport_final_delete(&rport->rport_delete_work);
+ }
+
+ list_for_each_entry_safe(rport, next_rport,
+ &fc_host->rport_bindings, peers) {
+ /* Cancel ALL timers and work before removing from list */
+ cancel_delayed_work_sync(&rport->fail_io_work);
+ cancel_delayed_work_sync(&rport->dev_loss_work);
+ cancel_work_sync(&rport->scan_work);
+ cancel_work_sync(&rport->stgt_delete_work);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del(&rport->peers);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ /* Now safe to do final deletion synchronously */
+ fc_rport_final_delete(&rport->rport_delete_work);
+ }
+
/* flush all scan work items */
scsi_flush_work(shost);
@@ -2983,21 +3024,22 @@ fc_rport_final_delete(struct work_struct *work)
scsi_flush_work(shost);
/*
- * Cancel any outstanding timers. These should really exist
- * only when rmmod'ing the LLDD and we're asking for
- * immediate termination of the rports
+ * Cancel any outstanding delayed work synchronously.
+ * This must be done BEFORE taking spinlock and BEFORE
+ * any state changes, as cancel_delayed_work_sync() can sleep.
+ *
+ * These timers should only exist when rmmod'ing the LLDD
+ * and we're asking for immediate termination of rports.
+ */
+ cancel_delayed_work_sync(&rport->fail_io_work);
+ cancel_delayed_work_sync(&rport->dev_loss_work);
+ cancel_work_sync(&rport->scan_work);
+ /*
+ * Now safe to clear the flag under spinlock since all
+ * async work has been cancelled.
*/
spin_lock_irqsave(shost->host_lock, flags);
- if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- if (!cancel_delayed_work(&rport->fail_io_work))
- fc_flush_devloss(shost, rport);
- if (!cancel_delayed_work(&rport->dev_loss_work))
- fc_flush_devloss(shost, rport);
- cancel_work_sync(&rport->scan_work);
- spin_lock_irqsave(shost->host_lock, flags);
- rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
- }
+ rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
spin_unlock_irqrestore(shost->host_lock, flags);
/* Delete SCSI target and sdevs */
@@ -3027,6 +3069,7 @@ fc_rport_final_delete(struct work_struct *work)
if (rport->devloss_work_q) {
work_q = rport->devloss_work_q;
rport->devloss_work_q = NULL;
+ flush_workqueue(work_q);
destroy_workqueue(work_q);
}
--
2.52.0
reply other threads:[~2026-04-09 15:12 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=e16406ec-d4d6-4783-b79c-5c00263c133c@linux.ibm.com \
--to=kmahlkuc@linux.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
--cc=paul.ely@broadcom.com \
--cc=thinhtr@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox