From: Himanshu Madhani <himanshu.madhani@oracle.com>
To: Justin Tee <justintee8345@gmail.com>, linux-scsi@vger.kernel.org
Cc: jsmart2021@gmail.com, justin.tee@broadcom.com
Subject: Re: [PATCH 13/17] lpfc: Protect vport fc_nodes list with an explicit spin lock
Date: Tue, 30 Jan 2024 18:50:42 -0800 [thread overview]
Message-ID: <0e5db4b1-7f8e-404d-8197-796f366e9e16@oracle.com> (raw)
In-Reply-To: <20240131003549.147784-14-justintee8345@gmail.com>
On 1/30/24 16:35, Justin Tee wrote:
> In attempt to reduce the amount of unnecessary shost_lock acquisitions in
> the lpfc driver, replace shost_lock with an explicit fc_nodes_list_lock
> spinlock when accessing vport->fc_nodes lists. Although vport memory
> region is owned by shost->hostdata, it is driver private memory and an
> explicit fc_nodes list lock for fc_nodes list mutations is more appropriate
> than locking the entire shost.
>
> Signed-off-by: Justin Tee <justin.tee@broadcom.com>
> ---
> drivers/scsi/lpfc/lpfc.h | 1 +
> drivers/scsi/lpfc/lpfc_attr.c | 35 +++++++++++-----------
> drivers/scsi/lpfc/lpfc_ct.c | 7 ++---
> drivers/scsi/lpfc/lpfc_debugfs.c | 12 ++++----
> drivers/scsi/lpfc/lpfc_hbadisc.c | 50 ++++++++++++++++----------------
> drivers/scsi/lpfc/lpfc_init.c | 2 +-
> 6 files changed, 53 insertions(+), 54 deletions(-)
>
> diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
> index 8f3cac09a381..da9f87f89941 100644
> --- a/drivers/scsi/lpfc/lpfc.h
> +++ b/drivers/scsi/lpfc/lpfc.h
> @@ -587,6 +587,7 @@ struct lpfc_vport {
> #define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
>
> struct list_head fc_nodes;
> + spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
>
> /* Keep counters for the number of entries in each list. */
> atomic_t fc_plogi_cnt;
> diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
> index 142c90eb210f..023f4f2c62a6 100644
> --- a/drivers/scsi/lpfc/lpfc_attr.c
> +++ b/drivers/scsi/lpfc/lpfc_attr.c
> @@ -344,6 +344,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
> struct lpfc_fc4_ctrl_stat *cstat;
> uint64_t data1, data2, data3;
> uint64_t totin, totout, tot;
> + unsigned long iflags;
> char *statep;
> int i;
> int len = 0;
> @@ -543,7 +544,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
> if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
> goto buffer_done;
>
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
>
> list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
> nrport = NULL;
> @@ -617,7 +618,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
> if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
> goto unlock_buf_done;
> }
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
>
> if (!lport)
> goto buffer_done;
> @@ -681,7 +682,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
> goto buffer_done;
>
> unlock_buf_done:
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
>
> buffer_done:
> len = strnlen(buf, PAGE_SIZE);
> @@ -3765,15 +3766,14 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
> static void
> lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
> {
> - struct Scsi_Host *shost;
> struct lpfc_nodelist *ndlp;
> + unsigned long iflags;
> #if (IS_ENABLED(CONFIG_NVME_FC))
> struct lpfc_nvme_rport *rport;
> struct nvme_fc_remote_port *remoteport = NULL;
> #endif
>
> - shost = lpfc_shost_from_vport(vport);
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
> if (ndlp->rport)
> ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
> @@ -3788,7 +3788,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
> vport->cfg_devloss_tmo);
> #endif
> }
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> }
>
> /**
> @@ -3974,8 +3974,8 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
> static int
> lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
> {
> - struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
> struct lpfc_nodelist *ndlp;
> + unsigned long iflags;
>
> if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
> return -EINVAL;
> @@ -3983,14 +3983,13 @@ lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
> if (val == vport->cfg_tgt_queue_depth)
> return 0;
>
> - spin_lock_irq(shost->host_lock);
> vport->cfg_tgt_queue_depth = val;
>
> /* Next loop thru nodelist and change cmd_qdepth */
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
> ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
> -
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> return 0;
> }
>
> @@ -5236,8 +5235,8 @@ lpfc_vport_param_show(max_scsicmpl_time);
> static int
> lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
> {
> - struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
> struct lpfc_nodelist *ndlp, *next_ndlp;
> + unsigned long iflags;
>
> if (val == vport->cfg_max_scsicmpl_time)
> return 0;
> @@ -5245,13 +5244,13 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
> return -EINVAL;
> vport->cfg_max_scsicmpl_time = val;
>
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
> if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
> continue;
> ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
> }
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> return 0;
> }
> lpfc_vport_param_store(max_scsicmpl_time);
> @@ -6853,17 +6852,19 @@ lpfc_get_node_by_target(struct scsi_target *starget)
> struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
> struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
> struct lpfc_nodelist *ndlp;
> + unsigned long iflags;
>
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> /* Search for this, mapped, target ID */
> list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
> if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
> starget->id == ndlp->nlp_sid) {
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
> + iflags);
> return ndlp;
> }
> }
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> return NULL;
> }
>
> diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
> index 315db836404a..633b8ba25bc3 100644
> --- a/drivers/scsi/lpfc/lpfc_ct.c
> +++ b/drivers/scsi/lpfc/lpfc_ct.c
> @@ -1853,11 +1853,10 @@ static uint32_t
> lpfc_find_map_node(struct lpfc_vport *vport)
> {
> struct lpfc_nodelist *ndlp, *next_ndlp;
> - struct Scsi_Host *shost;
> + unsigned long iflags;
> uint32_t cnt = 0;
>
> - shost = lpfc_shost_from_vport(vport);
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
> if (ndlp->nlp_type & NLP_FABRIC)
> continue;
> @@ -1865,7 +1864,7 @@ lpfc_find_map_node(struct lpfc_vport *vport)
> (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
> cnt++;
> }
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> return cnt;
> }
>
> diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
> index ea9b42225e62..03abc401c5f2 100644
> --- a/drivers/scsi/lpfc/lpfc_debugfs.c
> +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
> @@ -806,10 +806,10 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
> {
> int len = 0;
> int i, iocnt, outio, cnt;
> - struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
> struct lpfc_hba *phba = vport->phba;
> struct lpfc_nodelist *ndlp;
> unsigned char *statep;
> + unsigned long iflags;
> struct nvme_fc_local_port *localport;
> struct nvme_fc_remote_port *nrport = NULL;
> struct lpfc_nvme_rport *rport;
> @@ -818,7 +818,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
> outio = 0;
>
> len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
> iocnt = 0;
> if (!cnt) {
> @@ -908,7 +908,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
> ndlp->nlp_defer_did);
> len += scnprintf(buf+len, size-len, "\n");
> }
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
>
> len += scnprintf(buf + len, size - len,
> "\nOutstanding IO x%x\n", outio);
> @@ -940,8 +940,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
> if (!localport)
> goto out_exit;
>
> - spin_lock_irq(shost->host_lock);
> -
> /* Port state is only one of two values for now. */
> if (localport->port_id)
> statep = "ONLINE";
> @@ -953,6 +951,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
> localport->port_id, statep);
>
> len += scnprintf(buf + len, size - len, "\tRport List:\n");
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
> /* local short-hand pointer. */
> spin_lock(&ndlp->lock);
> @@ -1006,8 +1005,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
> /* Terminate the string. */
> len += scnprintf(buf + len, size - len, "\n");
> }
> -
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> out_exit:
> return len;
> }
> diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
> index 7c4356d87730..08acd5d398aa 100644
> --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
> +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
> @@ -4860,10 +4860,10 @@ void
> lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
> int state)
> {
> - struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
> int old_state = ndlp->nlp_state;
> int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
> char name1[16], name2[16];
> + unsigned long iflags;
>
> lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
> "0904 NPort state transition x%06x, %s -> %s\n",
> @@ -4890,9 +4890,9 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
> }
>
> if (list_empty(&ndlp->nlp_listp)) {
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> } else if (old_state)
> lpfc_nlp_counters(vport, old_state, -1);
>
> @@ -4904,26 +4904,26 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
> void
> lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
> {
> - struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
> + unsigned long iflags;
>
> if (list_empty(&ndlp->nlp_listp)) {
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> }
> }
>
> void
> lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
> {
> - struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
> + unsigned long iflags;
>
> lpfc_cancel_retry_delay_tmo(vport, ndlp);
> if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
> lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
> list_del_init(&ndlp->nlp_listp);
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
> lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
> NLP_STE_UNUSED_NODE);
> }
> @@ -5421,8 +5421,8 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
> {
> struct lpfc_vport **vports;
> struct lpfc_nodelist *ndlp;
> - struct Scsi_Host *shost;
> int i;
> + unsigned long iflags;
>
> vports = lpfc_create_vport_work_array(phba);
> if (!vports) {
> @@ -5431,17 +5431,18 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
> return;
> }
> for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
> - shost = lpfc_shost_from_vport(vports[i]);
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
> list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
> if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
> /* The mempool_alloc might sleep */
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
> + iflags);
> lpfc_unreg_rpi(vports[i], ndlp);
> - spin_lock_irq(shost->host_lock);
> + spin_lock_irqsave(&vports[i]->fc_nodes_list_lock,
> + iflags);
> }
> }
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
> }
> lpfc_destroy_vport_work_array(phba, vports);
> }
> @@ -5683,12 +5684,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
> struct lpfc_nodelist *
> lpfc_findnode_mapped(struct lpfc_vport *vport)
> {
> - struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
> struct lpfc_nodelist *ndlp;
> uint32_t data1;
> unsigned long iflags;
>
> - spin_lock_irqsave(shost->host_lock, iflags);
> + spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
>
> list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
> if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
> @@ -5697,7 +5697,8 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
> ((uint32_t)ndlp->nlp_xri << 16) |
> ((uint32_t)ndlp->nlp_type << 8) |
> ((uint32_t)ndlp->nlp_rpi & 0xff));
> - spin_unlock_irqrestore(shost->host_lock, iflags);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
> + iflags);
> lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE,
> "2025 FIND node DID MAPPED "
> "Data: x%px x%x x%x x%x x%px\n",
> @@ -5707,7 +5708,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
> return ndlp;
> }
> }
> - spin_unlock_irqrestore(shost->host_lock, iflags);
> + spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
>
> /* FIND node did <did> NOT FOUND */
> lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
> @@ -6742,7 +6743,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
> struct lpfc_vport **vports;
> int i, ret = 0;
> struct lpfc_nodelist *ndlp;
> - struct Scsi_Host *shost;
> + unsigned long iflags;
>
> vports = lpfc_create_vport_work_array(phba);
>
> @@ -6751,8 +6752,6 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
> return 1;
>
> for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
> - shost = lpfc_shost_from_vport(vports[i]);
> - spin_lock_irq(shost->host_lock);
> /*
> * IF the CVL_RCVD bit is not set then we have sent the
> * flogi.
> @@ -6760,15 +6759,16 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
> * unreg the fcf.
> */
> if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
> - spin_unlock_irq(shost->host_lock);
> ret = 1;
> goto out;
> }
> + spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags);
> list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
> if (ndlp->rport &&
> (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
> ret = 1;
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock,
> + iflags);
> goto out;
> } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
> ret = 1;
> @@ -6780,7 +6780,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
> ndlp->nlp_flag);
> }
> }
> - spin_unlock_irq(shost->host_lock);
> + spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags);
> }
> out:
> lpfc_destroy_vport_work_array(phba, vports);
> diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
> index 1285a7bbdced..c43118fab4aa 100644
> --- a/drivers/scsi/lpfc/lpfc_init.c
> +++ b/drivers/scsi/lpfc/lpfc_init.c
> @@ -3829,7 +3829,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
> vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
> spin_unlock_irq(shost->host_lock);
>
> - shost = lpfc_shost_from_vport(vports[i]);
> list_for_each_entry_safe(ndlp, next_ndlp,
> &vports[i]->fc_nodes,
> nlp_listp) {
> @@ -4833,6 +4832,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
>
> /* Initialize all internally managed lists. */
> INIT_LIST_HEAD(&vport->fc_nodes);
> + spin_lock_init(&vport->fc_nodes_list_lock);
> INIT_LIST_HEAD(&vport->rcv_buffer_list);
> spin_lock_init(&vport->work_port_lock);
>
Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
--
Himanshu Madhani Oracle Linux Engineering
next prev parent reply other threads:[~2024-01-31 2:50 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-31 0:35 [PATCH 00/17] lpfc: Update lpfc to revision 14.4.0.0 Justin Tee
2024-01-31 0:35 ` [PATCH 01/17] lpfc: Initialize status local variable in lpfc_sli4_repost_sgl_list Justin Tee
2024-01-31 2:35 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 02/17] lpfc: Fix possible memory leak in lpfc_rcv_padisc Justin Tee
2024-01-31 2:35 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 03/17] lpfc: Use sg_dma_len API to get struct scatterlist's length Justin Tee
2024-01-31 2:36 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 04/17] lpfc: Remove D_ID swap log message from trace event logger Justin Tee
2024-01-31 2:36 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 05/17] lpfc: Allow lpfc_plogi_confirm_nport logic to execute for Fabric nodes Justin Tee
2024-01-31 2:36 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 06/17] lpfc: Remove NLP_RCV_PLOGI early return during RSCN processing for ndlps Justin Tee
2024-01-31 2:37 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 07/17] lpfc: Fix failure to delete vports when discovery is in progress Justin Tee
2024-01-31 2:37 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 08/17] lpfc: Add condition to delete ndlp object after sending BLS_RJT to an ABTS Justin Tee
2024-01-31 2:39 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 09/17] lpfc: Save FPIN frequency statistics upon receipt of peer cgn notifications Justin Tee
2024-01-31 2:39 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 10/17] lpfc: Move handling of reset congestion statistics events Justin Tee
2024-01-31 2:40 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 11/17] lpfc: Remove shost_lock protection for fc_host_port shost APIs Justin Tee
2024-01-31 2:41 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 12/17] lpfc: Change nlp state statistic counters into atomic_t Justin Tee
2024-01-31 2:44 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 13/17] lpfc: Protect vport fc_nodes list with an explicit spin lock Justin Tee
2024-01-31 2:50 ` Himanshu Madhani [this message]
2024-01-31 0:35 ` [PATCH 14/17] lpfc: Change lpfc_vport fc_flag member into a bitmask Justin Tee
2024-01-31 3:35 ` Himanshu Madhani
2024-01-31 18:31 ` Justin Tee
2024-01-31 0:35 ` [PATCH 15/17] lpfc: Change lpfc_vport load_flag " Justin Tee
2024-01-31 0:35 ` [PATCH 16/17] lpfc: Update lpfc version to 14.4.0.0 Justin Tee
2024-01-31 2:33 ` Himanshu Madhani
2024-01-31 0:35 ` [PATCH 17/17] lpfc: Copyright updates for 14.4.0.0 patches Justin Tee
2024-01-31 2:33 ` Himanshu Madhani
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=0e5db4b1-7f8e-404d-8197-796f366e9e16@oracle.com \
--to=himanshu.madhani@oracle.com \
--cc=jsmart2021@gmail.com \
--cc=justin.tee@broadcom.com \
--cc=justintee8345@gmail.com \
--cc=linux-scsi@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox