The Linux Kernel Mailing List
 help / color / mirror / Atom feed
* Re: [PATCH 1/5] ibmvfc: add basic FPIN support
       [not found] ` <20260408-ibmvfc-fpin-support-v1-1-52b06c464e03@linux.ibm.com>
@ 2026-05-07  4:12   ` Tyrel Datwyler
  2026-05-07 22:22     ` Dave Marquardt
  0 siblings, 1 reply; 11+ messages in thread
From: Tyrel Datwyler @ 2026-05-07  4:12 UTC (permalink / raw)
  To: davemarq, James E.J. Bottomley, Martin K. Petersen,
	Madhavan Srinivasan, Michael Ellerman, Nicholas Piggin,
	Christophe Leroy (CS GROUP)
  Cc: linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
> From: Dave Marquardt <davemarq@linux.ibm.com>
> 
> - Add FPIN event descriptor
> - Add congestion cleared status
> - Add code to handle basic FPIN async event
> - Add KUnit tests

You need a more detailed description of your changes here for the commit log body.

You will also need a signed off tag from yourself for this to even be merged.

https://www.kernel.org/doc/html/latest/process/submitting-patches.html

> ---
>  drivers/scsi/Kconfig                 |  10 ++
>  drivers/scsi/ibmvscsi/Makefile       |   1 +
>  drivers/scsi/ibmvscsi/ibmvfc.c       | 189 ++++++++++++++++++++++++++++++++++-
>  drivers/scsi/ibmvscsi/ibmvfc.h       |   9 ++
>  drivers/scsi/ibmvscsi/ibmvfc_kunit.c |  95 ++++++++++++++++++
>  5 files changed, 302 insertions(+), 2 deletions(-)

<snip>

> +static struct fc_els_fpin *
> +ibmvfc_common_fpin_to_desc(u8 fpin_status, __be64 wwpn, __be16 modifier,
> +			   __be32 period, __be32 threshold, __be32 event_count)
> +{
> +	struct fc_fn_peer_congn_desc *pdesc;
> +	struct fc_fn_congn_desc *cdesc;
> +	struct fc_fn_li_desc *ldesc;
> +	struct fc_els_fpin *fpin;
> +	size_t size;
> +
> +	size = ibmvfc_fpin_size_helper(fpin_status);
> +	if (size == 0)
> +		return NULL;
> +
> +	fpin = kzalloc(size, GFP_KERNEL);

This appears to be called by ibmvfc_handle_async() with runs in atomic context
and cannot therefore sleep. This allocation needs to be GFP_ATOMIC. Although
there is another issue below that might make this moot.

> +	if (fpin == NULL)
> +		return NULL;
> +
> +	fpin->fpin_cmd = ELS_FPIN;
> +
> +	switch (fpin_status) {
> +	case IBMVFC_AE_FPIN_CONGESTION_CLEARED:
> +	case IBMVFC_AE_FPIN_LINK_CONGESTED:
> +		fpin->desc_len = cpu_to_be32(sizeof(struct fc_fn_congn_desc));
> +		cdesc = (struct fc_fn_congn_desc *)fpin->fpin_desc;
> +		cdesc->desc_tag = cpu_to_be32(ELS_DTAG_CONGESTION);
> +		cdesc->desc_len = cpu_to_be32(FC_TLV_DESC_LENGTH_FROM_SZ(*cdesc));
> +		if (fpin_status == IBMVFC_AE_FPIN_CONGESTION_CLEARED)
> +			cdesc->event_type = cpu_to_be16(FPIN_CONGN_CLEAR);
> +		else
> +			cdesc->event_type = cpu_to_be16(FPIN_CONGN_DEVICE_SPEC);
> +		cdesc->event_modifier = modifier;
> +		cdesc->event_period = period;
> +		cdesc->severity = FPIN_CONGN_SEVERITY_WARNING;
> +		break;
> +	case IBMVFC_AE_FPIN_PORT_CONGESTED:
> +	case IBMVFC_AE_FPIN_PORT_CLEARED:
> +		fpin->desc_len = cpu_to_be32(sizeof(struct fc_fn_peer_congn_desc));
> +		pdesc = (struct fc_fn_peer_congn_desc *)fpin->fpin_desc;
> +		pdesc->desc_tag = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
> +		pdesc->desc_len = cpu_to_be32(FC_TLV_DESC_LENGTH_FROM_SZ(*pdesc));
> +		if (fpin_status == IBMVFC_AE_FPIN_PORT_CLEARED)
> +			pdesc->event_type = cpu_to_be16(FPIN_CONGN_CLEAR);
> +		else
> +			pdesc->event_type = cpu_to_be16(FPIN_CONGN_DEVICE_SPEC);
> +		pdesc->event_modifier = modifier;
> +		pdesc->event_period = period;
> +		pdesc->detecting_wwpn = cpu_to_be64(0);
> +		pdesc->attached_wwpn = wwpn;
> +		pdesc->pname_count = cpu_to_be32(1);
> +		pdesc->pname_list[0] = wwpn;
> +		break;
> +	case IBMVFC_AE_FPIN_PORT_DEGRADED:
> +		fpin->desc_len = cpu_to_be32(sizeof(struct fc_fn_li_desc));
> +		ldesc = (struct fc_fn_li_desc *)fpin->fpin_desc;
> +		ldesc->desc_tag = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
> +		ldesc->desc_len = cpu_to_be32(FC_TLV_DESC_LENGTH_FROM_SZ(*ldesc));
> +		ldesc->event_type = cpu_to_be16(FPIN_LI_UNKNOWN);
> +		ldesc->event_modifier = modifier;
> +		ldesc->event_threshold = threshold;
> +		ldesc->event_count = event_count;
> +		ldesc->detecting_wwpn = cpu_to_be64(0);
> +		ldesc->attached_wwpn = wwpn;
> +		ldesc->pname_count = cpu_to_be32(1);
> +		ldesc->pname_list[0] = wwpn;
> +		break;
> +	default:
> +		/* This should be caught above. */
> +		kfree(fpin);
> +		fpin = NULL;
> +		break;
> +	}
> +
> +	return fpin;
> +}
> +
> +/**
> + * ibmvfc_basic_fpin_to_desc(): allocate and populate a struct fc_els_fpin struct
> + * containing a descriptor.
> + * @ibmvfc_fpin: Pointer to async crq
> + *
> + * Allocate a struct fc_els_fpin containing a descriptor and populate
> + * based on data from *ibmvfc_fpin.
> + *
> + * Return:
> + * NULL     - unable to allocate structure
> + * non-NULL - pointer to populated struct fc_els_fpin
> + */
> +static struct fc_els_fpin *
> +/*XXX*/ibmvfc_basic_fpin_to_desc(struct ibmvfc_async_crq *crq)

What is with this /*XXX*/? I can't find it once I apply the patchset so I assume
its removed in a later patch, but it should be removed here.

> +{
> +	return ibmvfc_common_fpin_to_desc(crq->fpin_status, crq->wwpn,
> +					  cpu_to_be16(0),
> +					  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_PERIOD),
> +					  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_THRESHOLD),
> +					  cpu_to_be32(1));
> +}
> +
>  /**
>   * ibmvfc_handle_async - Handle an async event from the adapter
>   * @crq:	crq to process
>   * @vhost:	ibmvfc host struct
>   *
>   **/
> -static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
> -				struct ibmvfc_host *vhost)
> +VISIBLE_IF_KUNIT void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
> +					  struct ibmvfc_host *vhost)
>  {
>  	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
>  	struct ibmvfc_target *tgt;
> +	struct fc_els_fpin *fpin;
>  
>  	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
>  		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
> @@ -3269,11 +3422,37 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
>  	case IBMVFC_AE_HALT:
>  		ibmvfc_link_down(vhost, IBMVFC_HALTED);
>  		break;
> +	case IBMVFC_AE_FPIN:
> +		if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
> +			break;
> +		list_for_each_entry(tgt, &vhost->targets, queue) {
> +			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
> +				continue;
> +			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
> +				continue;
> +			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
> +				continue;
> +			if (!tgt->rport)
> +				continue;
> +			fpin = ibmvfc_basic_fpin_to_desc(crq);
> +			if (fpin) {
> +				fc_host_fpin_rcv(tgt->vhost->host,
> +						 sizeof(*fpin) +
> +						       be32_to_cpu(fpin->desc_len),
> +						 (char *)fpin, 0);

This call to fc_host_fpin_rcv() appears to be problematic as it assumes no locks
are held, but ibmvfc_handle_async() is called with the scsi host lock held. We
already do a lot more work than we probaly should in our interrupt handler. I
think we maybe need to pass the FPIN work off to a workqueue instead to be
handled in process context instead.

-Tyrel



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/5] ibmvfc: Add NOOP command support
       [not found] ` <20260408-ibmvfc-fpin-support-v1-2-52b06c464e03@linux.ibm.com>
@ 2026-05-07  4:17   ` Tyrel Datwyler
  2026-05-07 22:25     ` Dave Marquardt
  0 siblings, 1 reply; 11+ messages in thread
From: Tyrel Datwyler @ 2026-05-07  4:17 UTC (permalink / raw)
  To: davemarq, James E.J. Bottomley, Martin K. Petersen,
	Madhavan Srinivasan, Michael Ellerman, Nicholas Piggin,
	Christophe Leroy (CS GROUP)
  Cc: linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
> From: Dave Marquardt <davemarq@linux.ibm.com>

As with patch 1 this requires a slightly more detailed commit log message and
developer sign off tag.

> 
> - Add VFC_NOOP command support
> - Add KUnit tests for VFC_NOOP command
> ---
>  drivers/scsi/ibmvscsi/ibmvfc.c       | 23 +++++++++++++----------
>  drivers/scsi/ibmvscsi/ibmvfc.h       | 13 +++++++++++++
>  drivers/scsi/ibmvscsi/ibmvfc_kunit.c | 27 +++++++++++++++++++++++++++
>  3 files changed, 53 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index 3ac376ba2c62..808301fa452d 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -189,13 +189,6 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
>  	return rc;
>  }
>  
> -static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
> -{
> -	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
> -
> -	return (host_caps & cap_flags) ? 1 : 0;
> -}
> -

It appears you are moving this to ibmvfc.h? Is there reasoning outside making it
visible to kunit?

>  static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
>  						   struct ibmvfc_cmd *vfc_cmd)
>  {
> @@ -1512,7 +1505,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
>  		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
>  
>  	login_info->max_cmds = cpu_to_be32(max_cmds);
> -	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
> +	login_info->capabilities =
> +		cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN |
> +			    IBMVFC_CAN_USE_NOOP_CMD);
>  
>  	if (vhost->mq_enabled || vhost->using_channels)
>  		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
> @@ -3461,8 +3456,8 @@ EXPORT_SYMBOL_IF_KUNIT(ibmvfc_handle_async);
>   * @evt_doneq:	Event done queue
>   *
>  **/
> -static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
> -			      struct list_head *evt_doneq)
> +VISIBLE_IF_KUNIT void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
> +					struct list_head *evt_doneq)
>  {
>  	long rc;
>  	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
> @@ -3520,6 +3515,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
>  	if (crq->format == IBMVFC_ASYNC_EVENT)
>  		return;
>  
> +	if (crq->format == IBMVFC_VFC_NOOP) {
> +		if (!ibmvfc_check_caps(vhost, IBMVFC_SUPPORT_NOOP_CMD))
> +			dev_err(vhost->dev,
> +				"Received unexpected NOOP command from partner\n");

If we have a misbahaved VIOS partner we may want to ratelimit this dev_err so
that we don't flood the log. Probably a corner case, but I don't think it hurts.

-Tyrel

> +		return;
> +	}
> +
>  	/* The only kind of payload CRQs we should get are responses to
>  	 * things we send. Make sure this response is to something we
>  	 * actually sent



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/5] ibmvfc: make ibmvfc login to fabric
       [not found] ` <20260408-ibmvfc-fpin-support-v1-3-52b06c464e03@linux.ibm.com>
@ 2026-05-07  5:03   ` Tyrel Datwyler
  2026-05-07 22:34     ` Dave Marquardt
  0 siblings, 1 reply; 11+ messages in thread
From: Tyrel Datwyler @ 2026-05-07  5:03 UTC (permalink / raw)
  To: davemarq, James E.J. Bottomley, Martin K. Petersen,
	Madhavan Srinivasan, Michael Ellerman, Nicholas Piggin,
	Christophe Leroy (CS GROUP)
  Cc: linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
> From: Dave Marquardt <davemarq@linux.ibm.com>
> 
> Make ibmvfc login to fabric when NPIV login returns SUPPORT_SCSI or
> SUPPORT_NVMEOF capabilities.

Again better commit log message here and developer sign off tag.

> ---
>  drivers/scsi/ibmvscsi/ibmvfc.c | 100 ++++++++++++++++++++++++++++++++++++++---
>  drivers/scsi/ibmvscsi/ibmvfc.h |  20 +++++++++
>  2 files changed, 115 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index 808301fa452d..803fc3caa14d 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -5205,6 +5205,89 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
>  		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
>  }
>  
> +static void ibmvfc_fabric_login_done(struct ibmvfc_event *evt)
> +{
> +	struct ibmvfc_fabric_login *rsp = &evt->xfer_iu->fabric_login;
> +	u32 mad_status = be16_to_cpu(rsp->common.status);
> +	struct ibmvfc_host *vhost = evt->vhost;
> +	int level = IBMVFC_DEFAULT_LOG_LEVEL;
> +
> +	ENTER;
> +
> +	switch (mad_status) {
> +	case IBMVFC_MAD_SUCCESS:
> +		vhost->logged_in = 1;

I'm not sure I see the point of setting logged_in here since we already set it
in npiv_login_done.

> +		vhost->fabric_capabilities = rsp->capabilities;

The way this is currently spec'd out there are no linux relevant capabilities
coming from fabric login. So, I'm not sure there is a reason to save them at
this point.

> +		fc_host_port_id(vhost->host) = be64_to_cpu(rsp->nport_id);
> +		ibmvfc_free_event(evt);
> +		break;
> +
> +	case IBMVFC_MAD_FAILED:
> +		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
> +			level += ibmvfc_retry_host_init(vhost);
> +		else
> +			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
> +		ibmvfc_log(vhost, level, "Fabric Login failed: %s (%x:%x)\n",
> +			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
> +						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
> +		ibmvfc_free_event(evt);
> +		LEAVE;
> +		return;
> +
> +	case IBMVFC_MAD_CRQ_ERROR:
> +		ibmvfc_retry_host_init(vhost);
> +		fallthrough;
> +
> +	case IBMVFC_MAD_DRIVER_FAILED:
> +		ibmvfc_free_event(evt);
> +		LEAVE;
> +		return;
> +
> +	default:
> +		dev_err(vhost->dev, "Invalid fabric Login response: 0x%x\n", mad_status);
> +		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
> +		ibmvfc_free_event(evt);
> +		LEAVE;
> +		return;
> +	}
> +
> +	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
> +	wake_up(&vhost->work_wait_q);
> +
> +	LEAVE;
> +}
> +
> +static void ibmvfc_fabric_login(struct ibmvfc_host *vhost)
> +{
> +	struct ibmvfc_fabric_login *mad;
> +	struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
> +	int level = IBMVFC_DEFAULT_LOG_LEVEL;
> +
> +	if (!evt) {

I think we need to hard reset here or we are dead in the water if there are no
events.

> +		ibmvfc_log(vhost, level, "Fabric Login failed: no available events\n");
> +		return;
> +	}
> +
> +	ibmvfc_init_event(evt, ibmvfc_fabric_login_done, IBMVFC_MAD_FORMAT);
> +	mad = &evt->iu.fabric_login;
> +	memset(mad, 0, sizeof(*mad));
> +	if (vhost->scsi_scrqs.protocol == IBMVFC_PROTO_SCSI)
> +		mad->common.opcode = cpu_to_be32(IBMVFC_FABRIC_LOGIN);
> +	else if (vhost->scsi_scrqs.protocol == IBMVFC_PROTO_NVME)
> +		mad->common.opcode = cpu_to_be32(IBMVFC_NVMF_FABRIC_LOGIN);

The VIOS won't return NVMF support unless we advertise it. So, I think its best
to omit any NVMF releveant changes that are spec'd as they aren't being applied
in a proper workflow here anyways. If the driver advertised both SCSI and NVMF
support the current code would never do a NVMF fabric login as it would never
fall through here.

> +	else {
> +		ibmvfc_log(vhost, level, "Fabric Login failed: unknown protocol\n");
> +		return;
> +	}
> +	mad->common.version = cpu_to_be32(1);
> +	mad->common.length = cpu_to_be16(sizeof(*mad));
> +
> +	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
> +
> +	if (ibmvfc_send_event(evt, vhost, default_timeout))
> +		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
> +}
> +
>  static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
>  {
>  	struct ibmvfc_host *vhost = evt->vhost;
> @@ -5251,8 +5334,12 @@ static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
>  		return;
>  	}
>  
> -	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
> -	wake_up(&vhost->work_wait_q);
> +	if (ibmvfc_check_caps(vhost, (IBMVFC_SUPPORT_SCSI | IBMVFC_SUPPORT_NVMEOF))) {
> +		ibmvfc_fabric_login(vhost);

Again drop the NVMEOF code.

> +	} else {
> +		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
> +		wake_up(&vhost->work_wait_q);
> +	}
>  }
>  
>  static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
> @@ -5443,9 +5530,12 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
>  	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
>  	vhost->host->max_sectors = npiv_max_sectors;
>  
> -	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
> -		ibmvfc_channel_enquiry(vhost);
> -	} else {
> +	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS)) {
> +		if (vhost->do_enquiry)
> +			ibmvfc_channel_enquiry(vhost);

I'm not sure I understand expanding this code to a second if block as there is
no functional change.

> +	} else if (ibmvfc_check_caps(vhost, (IBMVFC_SUPPORT_SCSI | IBMVFC_SUPPORT_NVMEOF)))

Again drop NVMEOF and NVMF related changes.

> +		ibmvfc_fabric_login(vhost);
> +	else {
>  		vhost->do_enquiry = 0;
>  		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
>  		wake_up(&vhost->work_wait_q);
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
> index cd0917f70c6d..4f680c5d9558 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.h
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.h
> @@ -138,6 +138,8 @@ enum ibmvfc_mad_types {
>  	IBMVFC_CHANNEL_ENQUIRY	= 0x1000,
>  	IBMVFC_CHANNEL_SETUP	= 0x2000,
>  	IBMVFC_CONNECTION_INFO	= 0x4000,
> +	IBMVFC_FABRIC_LOGIN	= 0x8000,
> +	IBMVFC_NVMF_FABRIC_LOGIN	= 0x8001,
>  };
>  
>  struct ibmvfc_mad_common {
> @@ -227,6 +229,8 @@ struct ibmvfc_npiv_login_resp {
>  #define IBMVFC_MAD_VERSION_CAP		0x20
>  #define IBMVFC_HANDLE_VF_WWPN		0x40
>  #define IBMVFC_CAN_SUPPORT_CHANNELS	0x80
> +#define IBMVFC_SUPPORT_NVMEOF		0x100
> +#define IBMVFC_SUPPORT_SCSI		0x200
>  #define IBMVFC_SUPPORT_NOOP_CMD		0x1000
>  	__be32 max_cmds;
>  	__be32 scsi_id_sz;
> @@ -590,6 +594,19 @@ struct ibmvfc_connection_info {
>  	__be64 reserved[16];
>  } __packed __aligned(8);
>  
> +struct ibmvfc_fabric_login {
> +	struct ibmvfc_mad_common common;
> +	__be64 flags;
> +#define IBMVFC_STRIP_MERGE	0x1
> +#define IBMVFC_LINK_COMMANDS	0x2
> +	__be64 capabilities;
> +	__be64 nport_id;
> +	__be16 status;
> +	__be16 error;
> +	__be32 pad;
> +	__be64 reserved[16];
> +} __packed __aligned(8);
> +
>  struct ibmvfc_trace_start_entry {
>  	u32 xfer_len;
>  } __packed;
> @@ -709,6 +726,7 @@ union ibmvfc_iu {
>  	struct ibmvfc_channel_enquiry channel_enquiry;
>  	struct ibmvfc_channel_setup_mad channel_setup;
>  	struct ibmvfc_connection_info connection_info;
> +	struct ibmvfc_fabric_login fabric_login;
>  } __packed __aligned(8);
>  
>  enum ibmvfc_target_action {
> @@ -921,6 +939,8 @@ struct ibmvfc_host {
>  	struct work_struct rport_add_work_q;
>  	wait_queue_head_t init_wait_q;
>  	wait_queue_head_t work_wait_q;
> +	__be64 fabric_capabilities;
> +	unsigned int login_cap_index;

Lets drop these as they serve no purpose for Linux. If the spec changes to
introduce capabilites releveant to Linux we can add it then.

-Tyrel

>  };
>  
>  #define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/5] ibmvfc: use async sub-queue for FPIN messages
       [not found] ` <20260408-ibmvfc-fpin-support-v1-4-52b06c464e03@linux.ibm.com>
@ 2026-05-07  5:41   ` Tyrel Datwyler
  2026-05-07 22:40     ` Dave Marquardt
  0 siblings, 1 reply; 11+ messages in thread
From: Tyrel Datwyler @ 2026-05-07  5:41 UTC (permalink / raw)
  To: davemarq, James E.J. Bottomley, Martin K. Petersen,
	Madhavan Srinivasan, Michael Ellerman, Nicholas Piggin,
	Christophe Leroy (CS GROUP)
  Cc: linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
> From: Dave Marquardt <davemarq@linux.ibm.com>
> 
> - allocate async sub-queue
> - allocate interrupt and set up handler
> - negotiate use of async sub-queue with NPIV (VIOS)
> - refactor ibmvfc_basic_fpin_to_desc() and ibmvfc_full_fpin_to_desc()
>   into common routine
> - add KUnit test to verify async sub-queue is allocated

Again more descriptive commit log message required here. Also, this looks like a
lot of things being implmented. Can this be broken into multiple patches? It
sure looks like a bunch of functional changes that build on each other.

> ---
>  drivers/scsi/ibmvscsi/ibmvfc.c       | 325 ++++++++++++++++++++++++++++++++---
>  drivers/scsi/ibmvscsi/ibmvfc.h       |  29 +++-
>  drivers/scsi/ibmvscsi/ibmvfc_kunit.c |  52 +++---
>  3 files changed, 363 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index 803fc3caa14d..26e39b367022 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -1471,6 +1471,13 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
>  	of_node_put(rootdn);
>  }
>  
> +static __be64 ibmvfc_npiv_chan_caps[] = {
> +	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS | IBMVFC_USE_ASYNC_SUBQ |
> +		    IBMVFC_YES_SCSI | IBMVFC_CAN_HANDLE_FPIN),
> +	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS),
> +};
> +#define IBMVFC_NPIV_CHAN_CAPS_SIZE (sizeof(ibmvfc_npiv_chan_caps)/sizeof(__be64))
> +

I really don't understand what you are doing here? You seem to be definig
various sets of capabilities, but how does the driver decide which set to use?
As far as I can tell the index is increased and the capabilities decrease each
time a transport event is received. This looks like maybe its just a testing hack.

>  /**
>   * ibmvfc_set_login_info - Setup info for NPIV login
>   * @vhost:	ibmvfc host struct
> @@ -1486,6 +1493,8 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
>  	const char *location;
>  	u16 max_cmds;
>  
> +	ENTER;
> +
>  	max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
>  	if (mq_enabled)
>  		max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
> @@ -1509,8 +1518,12 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
>  		cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN |
>  			    IBMVFC_CAN_USE_NOOP_CMD);
>  
> -	if (vhost->mq_enabled || vhost->using_channels)
> -		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
> +	if (vhost->mq_enabled || vhost->using_channels) {
> +		if (vhost->login_cap_index >= IBMVFC_NPIV_CHAN_CAPS_SIZE)
> +			login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
> +		else
> +			login_info->capabilities |= ibmvfc_npiv_chan_caps[vhost->login_cap_index];
> +	}
>  
>  	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
>  	login_info->async.len = cpu_to_be32(async_crq->size *
> @@ -1524,6 +1537,8 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
>  	location = of_get_property(of_node, "ibm,loc-code", NULL);
>  	location = location ? location : dev_name(vhost->dev);
>  	strscpy(login_info->drc_name, location, sizeof(login_info->drc_name));
> +
> +	LEAVE;
>  }
>  
>  /**
> @@ -3323,7 +3338,7 @@ ibmvfc_common_fpin_to_desc(u8 fpin_status, __be64 wwpn, __be16 modifier,
>   * non-NULL - pointer to populated struct fc_els_fpin
>   */
>  static struct fc_els_fpin *
> -/*XXX*/ibmvfc_basic_fpin_to_desc(struct ibmvfc_async_crq *crq)

I mentioned this /*XXX*/ in an earlier patch. This needs to be fixed in that patch.

> +ibmvfc_basic_fpin_to_desc(struct ibmvfc_async_crq *crq)
>  {
>  	return ibmvfc_common_fpin_to_desc(crq->fpin_status, crq->wwpn,
>  					  cpu_to_be16(0),
> @@ -3332,6 +3347,29 @@ static struct fc_els_fpin *
>  					  cpu_to_be32(1));
>  }
>  
> +/**
> + * ibmvfc_full_fpin_to_desc(): allocate and populate a struct fc_els_fpin struct
> + * containing a descriptor.
> + * @ibmvfc_fpin: Pointer to async subq FPIN data
> + *
> + * Allocate a struct fc_els_fpin containing a descriptor and populate
> + * based on data from *ibmvfc_fpin.
> + *
> + * Return:
> + * NULL     - unable to allocate structure
> + * non-NULL - pointer to populated struct fc_els_fpin
> + */
> +static struct fc_els_fpin *
> +ibmvfc_full_fpin_to_desc(struct ibmvfc_async_subq *ibmvfc_fpin)
> +{
> +	return ibmvfc_common_fpin_to_desc(ibmvfc_fpin->fpin_status,
> +					  ibmvfc_fpin->wwpn,
> +					  cpu_to_be16(0),
> +					  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_PERIOD),
> +					  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_THRESHOLD),
> +					  cpu_to_be32(1));
> +}
> +
>  /**
>   * ibmvfc_handle_async - Handle an async event from the adapter
>   * @crq:	crq to process
> @@ -3449,6 +3487,120 @@ VISIBLE_IF_KUNIT void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
>  }
>  EXPORT_SYMBOL_IF_KUNIT(ibmvfc_handle_async);
>  
> +VISIBLE_IF_KUNIT void ibmvfc_handle_asyncq(struct ibmvfc_crq *crq_instance,
> +					   struct ibmvfc_host *vhost,
> +					   struct list_head *evt_doneq)
> +{
> +	struct ibmvfc_async_subq *crq = (struct ibmvfc_async_subq *)crq_instance;
> +	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be16_to_cpu(crq->event));
> +	struct ibmvfc_target *tgt;
> +	struct fc_els_fpin *fpin;
> +
> +	ibmvfc_log(vhost, desc->log_level,
> +		   "%s event received. wwpn: %llx, node_name: %llx%s event 0x%x\n",
> +		   desc->desc, be64_to_cpu(crq->wwpn), be64_to_cpu(crq->id.node_name),
> +		   ibmvfc_get_link_state(crq->link_state), be16_to_cpu(crq->event));

Was there no way to not copy/paste what looks like basically ibmvfc_handle_async
into ibmvfc_handle_asyncq? This is a bunch of unnecessary code bloat. The major
difference seems that crq->event is be64 on the standard CRQ and be16 on a
sub-crq and accessing certain fields differently.

Again I think maybe we need to consider moving all the async work into a workqueue.

> +
> +	switch (be16_to_cpu(crq->event)) {
> +	case IBMVFC_AE_RESUME:
> +		switch (crq->link_state) {
> +		case IBMVFC_AE_LS_LINK_DOWN:
> +			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
> +			break;
> +		case IBMVFC_AE_LS_LINK_DEAD:
> +			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
> +			break;
> +		case IBMVFC_AE_LS_LINK_UP:
> +		case IBMVFC_AE_LS_LINK_BOUNCED:
> +		default:
> +			vhost->events_to_log |= IBMVFC_AE_LINKUP;
> +			vhost->delay_init = 1;
> +			__ibmvfc_reset_host(vhost);
> +			break;
> +		}
> +
> +		break;
> +	case IBMVFC_AE_LINK_UP:
> +		vhost->events_to_log |= IBMVFC_AE_LINKUP;
> +		vhost->delay_init = 1;
> +		__ibmvfc_reset_host(vhost);
> +		break;
> +	case IBMVFC_AE_SCN_FABRIC:
> +	case IBMVFC_AE_SCN_DOMAIN:
> +		vhost->events_to_log |= IBMVFC_AE_RSCN;
> +		if (vhost->state < IBMVFC_HALTED) {
> +			vhost->delay_init = 1;
> +			__ibmvfc_reset_host(vhost);
> +		}
> +		break;
> +	case IBMVFC_AE_SCN_NPORT:
> +	case IBMVFC_AE_SCN_GROUP:
> +		vhost->events_to_log |= IBMVFC_AE_RSCN;
> +		ibmvfc_reinit_host(vhost);
> +		break;
> +	case IBMVFC_AE_ELS_LOGO:
> +	case IBMVFC_AE_ELS_PRLO:
> +	case IBMVFC_AE_ELS_PLOGI:
> +		list_for_each_entry(tgt, &vhost->targets, queue) {
> +			if (!crq->wwpn && !crq->id.node_name)
> +				break;
> +	#ifdef notyet
> +			if (cpu_to_be64(tgt->scsi_id) != acrq->scsi_id)
> +				continue;
> +	#endif
> +			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
> +				continue;
> +			if (crq->id.node_name &&
> +			    cpu_to_be64(tgt->ids.node_name) != crq->id.node_name)
> +				continue;
> +			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
> +				tgt->logo_rcvd = 1;
> +			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
> +				ibmvfc_del_tgt(tgt);
> +				ibmvfc_reinit_host(vhost);
> +			}
> +		}
> +		break;
> +	case IBMVFC_AE_LINK_DOWN:
> +	case IBMVFC_AE_ADAPTER_FAILED:
> +		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
> +		break;
> +	case IBMVFC_AE_LINK_DEAD:
> +		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
> +		break;
> +	case IBMVFC_AE_HALT:
> +		ibmvfc_link_down(vhost, IBMVFC_HALTED);
> +		break;
> +	case IBMVFC_AE_FPIN:
> +		if (!crq->wwpn && !crq->id.node_name)
> +			break;
> +		list_for_each_entry(tgt, &vhost->targets, queue) {
> +			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
> +				continue;
> +			if (crq->id.node_name &&
> +			    cpu_to_be64(tgt->ids.node_name) != crq->id.node_name)
> +				continue;
> +			if (!tgt->rport)
> +				continue;
> +			fpin = ibmvfc_full_fpin_to_desc(crq);
> +			if (fpin) {
> +				fc_host_fpin_rcv(tgt->vhost->host,
> +						 sizeof(*fpin) + be32_to_cpu(fpin->desc_len),
> +						 (char *)fpin, 0);
> +				kfree(fpin);
> +			} else
> +				dev_err(vhost->dev,
> +					"FPIN event %u received, unable to process\n",
> +					crq->fpin_status);
> +		}
> +		break;
> +	default:
> +		dev_err(vhost->dev, "Unknown async event received: %d\n", crq->event);
> +		break;
> +	}
> +}
> +EXPORT_SYMBOL_IF_KUNIT(ibmvfc_handle_asyncq);
> +
>  /**
>   * ibmvfc_handle_crq - Handles and frees received events in the CRQ
>   * @crq:	Command/Response queue
> @@ -3500,6 +3652,7 @@ VISIBLE_IF_KUNIT void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_ho
>  			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
>  			ibmvfc_purge_requests(vhost, DID_ERROR);
>  			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
> +			vhost->login_cap_index++;
>  			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
>  		} else {
>  			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
> @@ -4078,6 +4231,13 @@ static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost
>  	spin_unlock(&evt->queue->l_lock);
>  }
>  
> +/**
> + * ibmvfc_next_scrq - Returns the next entry in message subqueue
> + * @scrq:	Pointer to message subqueue
> + *
> + * Returns:
> + *	Pointer to next entry in queue / NULL if empty
> + **/
>  static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
>  {
>  	struct ibmvfc_crq *crq;
> @@ -4093,6 +4253,65 @@ static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
>  	return crq;
>  }
>  
> +static void ibmvfc_drain_async_subq(struct ibmvfc_queue *scrq)
> +{
> +	struct ibmvfc_crq *crq;
> +	struct ibmvfc_event *evt, *temp;
> +	unsigned long flags;
> +	int done = 0;
> +	LIST_HEAD(evt_doneq);
> +
> +	ENTER;
> +
> +	spin_lock_irqsave(scrq->q_lock, flags);
> +	while (!done) {
> +		while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
> +			ibmvfc_handle_asyncq(crq, scrq->vhost, &evt_doneq);
> +			crq->valid = 0;
> +			wmb();
> +		}
> +
> +		ibmvfc_toggle_scrq_irq(scrq, 1);
> +		crq = ibmvfc_next_scrq(scrq);
> +		if (crq != NULL) {
> +			ibmvfc_toggle_scrq_irq(scrq, 0);
> +			ibmvfc_handle_asyncq(crq, scrq->vhost, &evt_doneq);
> +			crq->valid = 0;
> +			wmb();
> +		} else
> +			done = 1;
> +	}
> +	spin_unlock_irqrestore(scrq->q_lock, flags);
> +
> +	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
> +		timer_delete(&evt->timer);
> +		list_del(&evt->queue_list);
> +		ibmvfc_trc_end(evt);
> +		evt->done(evt);
> +	}
> +	LEAVE;
> +}
> +
> +/**
> + * ibmvfc_interrupt_asyncq - Handle an async event from the adapter
> + * @irq:           interrupt request
> + * @scrq_instance: async subq
> + *
> + **/
> +static irqreturn_t ibmvfc_interrupt_asyncq(int irq, void *scrq_instance)
> +{
> +	struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
> +
> +	ENTER;
> +
> +	ibmvfc_toggle_scrq_irq(scrq, 0);
> +	ibmvfc_drain_async_subq(scrq);
> +
> +	LEAVE;
> +
> +	return IRQ_HANDLED;
> +}
> +
>  static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
>  {
>  	struct ibmvfc_crq *crq;
> @@ -5316,6 +5535,8 @@ static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
>  			for (i = 0; i < active_queues; i++)
>  				scrqs->scrqs[i].vios_cookie =
>  					be64_to_cpu(setup->channel_handles[i]);
> +			scrqs->async_scrq->vios_cookie =
> +				be64_to_cpu(setup->asyncSubqHandle);
>  
>  			ibmvfc_dbg(vhost, "Using %u channels\n",
>  				   vhost->scsi_scrqs.active_queues);
> @@ -5366,6 +5587,7 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
>  		setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
>  		for (i = 0; i < num_channels; i++)
>  			setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
> +		setup_buf->asyncSubqHandle = cpu_to_be64(scrqs->async_scrq->cookie);
>  	}
>  
>  	ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
> @@ -5461,6 +5683,8 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
>  	unsigned int npiv_max_sectors;
>  	int level = IBMVFC_DEFAULT_LOG_LEVEL;
>  
> +	ENTER;
> +
>  	switch (mad_status) {
>  	case IBMVFC_MAD_SUCCESS:
>  		ibmvfc_free_event(evt);
> @@ -5540,6 +5764,8 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
>  		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
>  		wake_up(&vhost->work_wait_q);
>  	}
> +
> +	LEAVE;
>  }
>  
>  /**
> @@ -6188,14 +6414,26 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
>  	return retrc;
>  }
>  
> -static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
> -				   struct ibmvfc_channels *channels,
> -				   int index)
> +static inline char *ibmvfc_channel_index(struct ibmvfc_channels *channels,
> +					 struct ibmvfc_queue *scrq,
> +					 char *buf, size_t bufsize)
> +{
> +	if (scrq < channels->scrqs || scrq >= channels->scrqs + channels->active_queues)
> +		strscpy(buf, "async", 6);
> +	else
> +		snprintf(buf, bufsize, "%ld", scrq - channels->scrqs);
> +	return buf;
> +}
> +
> +static int ibmvfc_register_channel_handler(struct ibmvfc_host *vhost,
> +					   struct ibmvfc_channels *channels,
> +					   struct ibmvfc_queue *scrq,
> +					   irq_handler_t irq)
>  {
>  	struct device *dev = vhost->dev;
>  	struct vio_dev *vdev = to_vio_dev(dev);
> -	struct ibmvfc_queue *scrq = &channels->scrqs[index];
>  	int rc = -ENOMEM;
> +	char buf[16];
>  
>  	ENTER;
>  
> @@ -6214,20 +6452,23 @@ static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
>  
>  	if (!scrq->irq) {
>  		rc = -EINVAL;
> -		dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
> +		dev_err(dev, "Error mapping sub-crq[%s] irq\n",
> +			ibmvfc_channel_index(channels, scrq, buf, sizeof(buf)));
>  		goto irq_failed;
>  	}
>  
>  	switch (channels->protocol) {
>  	case IBMVFC_PROTO_SCSI:
> -		snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
> -			 vdev->unit_address, index);
> -		scrq->handler = ibmvfc_interrupt_mq;
> +		snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%s",
> +			 vdev->unit_address,
> +			 ibmvfc_channel_index(channels, scrq, buf, sizeof(buf)));
> +		scrq->handler = irq;
>  		break;
>  	case IBMVFC_PROTO_NVME:
> -		snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-nvmf%d",
> -			 vdev->unit_address, index);
> -		scrq->handler = ibmvfc_interrupt_mq;
> +		snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-nvmf%s",
> +			 vdev->unit_address,
> +			 ibmvfc_channel_index(channels, scrq, buf, sizeof(buf)));
> +		scrq->handler = irq;
>  		break;
>  	default:
>  		dev_err(dev, "Unknown channel protocol (%d)\n",
> @@ -6238,12 +6479,14 @@ static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
>  	rc = request_irq(scrq->irq, scrq->handler, 0, scrq->name, scrq);
>  
>  	if (rc) {
> -		dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
> +		dev_err(dev, "Couldn't register sub-crq[%s] irq\n",
> +			ibmvfc_channel_index(channels, scrq, buf, sizeof(buf)));
>  		irq_dispose_mapping(scrq->irq);
>  		goto irq_failed;
>  	}
>  
> -	scrq->hwq_id = index;
> +	if (scrq >= channels->scrqs && scrq < channels->scrqs + channels->active_queues)
> +		scrq->hwq_id = scrq - channels->scrqs;
>  
>  	LEAVE;
>  	return 0;
> @@ -6257,13 +6500,21 @@ static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
>  	return rc;
>  }
>  
> +static inline int
> +ibmvfc_register_channel(struct ibmvfc_host *vhost,
> +			struct ibmvfc_channels *channels,
> +			struct ibmvfc_queue *scrq)
> +{
> +	return ibmvfc_register_channel_handler(vhost, channels, scrq, ibmvfc_interrupt_mq);
> +}
> +
>  static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
>  				      struct ibmvfc_channels *channels,
> -				      int index)
> +				      struct ibmvfc_queue *scrq)
>  {
>  	struct device *dev = vhost->dev;
>  	struct vio_dev *vdev = to_vio_dev(dev);
> -	struct ibmvfc_queue *scrq = &channels->scrqs[index];
> +	char buf[16];
>  	long rc;
>  
>  	ENTER;
> @@ -6278,7 +6529,8 @@ static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
>  	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
>  
>  	if (rc)
> -		dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
> +		dev_err(dev, "Failed to free sub-crq[%s]: rc=%ld\n",
> +			ibmvfc_channel_index(channels, scrq, buf, sizeof(buf)), rc);
>  
>  	/* Clean out the queue */
>  	memset(scrq->msgs.crq, 0, PAGE_SIZE);
> @@ -6296,10 +6548,19 @@ static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,
>  	if (!vhost->mq_enabled || !channels->scrqs)
>  		return;
>  
> +	if (ibmvfc_register_channel_handler(vhost, channels,
> +					    channels->async_scrq,
> +					    ibmvfc_interrupt_asyncq))
> +		return;
> +
>  	for (i = 0; i < channels->max_queues; i++) {
> -		if (ibmvfc_register_channel(vhost, channels, i)) {
> +		if (ibmvfc_register_channel(vhost, channels, &channels->scrqs[i])) {
>  			for (j = i; j > 0; j--)
> -				ibmvfc_deregister_channel(vhost, channels, j - 1);
> +				ibmvfc_deregister_channel(
> +					vhost, channels, &channels->scrqs[j - 1]);
> +			ibmvfc_deregister_channel(vhost, channels,
> +							channels->async_scrq);
> +
>  			vhost->do_enquiry = 0;
>  			return;
>  		}
> @@ -6318,7 +6579,8 @@ static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,
>  		return;
>  
>  	for (i = 0; i < channels->max_queues; i++)
> -		ibmvfc_deregister_channel(vhost, channels, i);
> +		ibmvfc_deregister_channel(vhost, channels, &channels->scrqs[i]);
> +	ibmvfc_deregister_channel(vhost, channels, channels->async_scrq);
>  
>  	LEAVE;
>  }
> @@ -6334,6 +6596,21 @@ static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
>  	if (!channels->scrqs)
>  		return -ENOMEM;
>  
> +	channels->async_scrq = kzalloc_obj(*channels->async_scrq, GFP_KERNEL);
> +
> +	if (!channels->async_scrq) {
> +		kfree(channels->scrqs);
> +		return -ENOMEM;
> +	}
> +
> +	rc = ibmvfc_alloc_queue(vhost, channels->async_scrq,
> +				IBMVFC_SUB_CRQ_FMT);
> +	if (rc) {
> +		kfree(channels->scrqs);
> +		kfree(channels->async_scrq);
> +		return rc;
> +	}
> +
>  	for (i = 0; i < channels->max_queues; i++) {
>  		scrq = &channels->scrqs[i];
>  		rc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT);
> @@ -6345,6 +6622,9 @@ static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
>  			kfree(channels->scrqs);
>  			channels->scrqs = NULL;
>  			channels->active_queues = 0;
> +			ibmvfc_free_queue(vhost, channels->async_scrq);
> +			kfree(channels->async_scrq);
> +			channels->async_scrq = NULL;
>  			return rc;
>  		}
>  	}
> @@ -6629,6 +6909,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
>  	vhost->using_channels = 0;
>  	vhost->do_enquiry = 1;
>  	vhost->scan_timeout = 0;
> +	vhost->login_cap_index = 0;
>  
>  	strcpy(vhost->partition_name, "UNKNOWN");
>  	init_waitqueue_head(&vhost->work_wait_q);
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
> index 4f680c5d9558..b9f22613d144 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.h
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.h
> @@ -182,6 +182,9 @@ struct ibmvfc_npiv_login {
>  #define IBMVFC_CAN_HANDLE_FPIN		0x04
>  #define IBMVFC_CAN_USE_MAD_VERSION	0x08
>  #define IBMVFC_CAN_SEND_VF_WWPN		0x10
> +#define IBMVFC_YES_NVMEOF		0x20
> +#define IBMVFC_YES_SCSI			0x40
> +#define IBMVFC_USE_ASYNC_SUBQ		0x100
>  #define IBMVFC_CAN_USE_NOOP_CMD		0x200
>  	__be64 node_name;
>  	struct srp_direct_buf async;
> @@ -231,6 +234,7 @@ struct ibmvfc_npiv_login_resp {
>  #define IBMVFC_CAN_SUPPORT_CHANNELS	0x80
>  #define IBMVFC_SUPPORT_NVMEOF		0x100
>  #define IBMVFC_SUPPORT_SCSI		0x200
> +#define IBMVFC_SUPPORT_ASYNC_SUBQ	0x800
>  #define IBMVFC_SUPPORT_NOOP_CMD		0x1000
>  	__be32 max_cmds;
>  	__be32 scsi_id_sz;
> @@ -565,7 +569,7 @@ struct ibmvfc_channel_setup_mad {
>  	struct srp_direct_buf buffer;
>  } __packed __aligned(8);
>  
> -#define IBMVFC_MAX_CHANNELS	502
> +#define IBMVFC_MAX_CHANNELS	501
>  
>  struct ibmvfc_channel_setup {
>  	__be32 flags;
> @@ -580,6 +584,7 @@ struct ibmvfc_channel_setup {
>  	struct srp_direct_buf buffer;
>  	__be64 reserved2[5];
>  	__be64 channel_handles[IBMVFC_MAX_CHANNELS];
> +	__be64 asyncSubqHandle;
>  } __packed __aligned(8);
>  
>  struct ibmvfc_connection_info {
> @@ -710,6 +715,25 @@ struct ibmvfc_async_crq {
>  	__be64 reserved;
>  } __packed __aligned(8);
>  
> +struct ibmvfc_async_subq {
> +	volatile u8 valid;
> +#define IBMVFC_ASYNC_ID_IS_ASSOC_ID	0x01
> +#define IBMVFC_FC_EEH			0x04
> +#define IBMVFC_FC_FW_UPDATE		0x08
> +#define IBMVFC_FC_FW_DUMP		0x10
> +	u8 flags;
> +	u8 link_state;
> +	u8 fpin_status;
> +	__be16 event;
> +	__be16 pad;
> +	volatile __be64 wwpn;
> +	volatile __be64 nport_id;
> +	union {
> +		__be64 node_name;
> +		__be64 assoc_id;
> +	} id;
> +} __packed __aligned(8);
> +
>  union ibmvfc_iu {
>  	struct ibmvfc_mad_common mad_common;
>  	struct ibmvfc_npiv_login_mad npiv_login;
> @@ -849,6 +873,7 @@ struct ibmvfc_queue {
>  
>  struct ibmvfc_channels {
>  	struct ibmvfc_queue *scrqs;
> +	struct ibmvfc_queue *async_scrq;
>  	enum ibmvfc_protocol protocol;
>  	unsigned int active_queues;
>  	unsigned int desired_queues;
> @@ -989,6 +1014,8 @@ static inline int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap
>  
>  #ifdef VISIBLE_IF_KUNIT
>  VISIBLE_IF_KUNIT void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost);
> +VISIBLE_IF_KUNIT void ibmvfc_handle_asyncq(struct ibmvfc_crq *crq_instance,
> +					   struct ibmvfc_host *vhost, struct list_head *evt_doneq);
>  VISIBLE_IF_KUNIT struct list_head *ibmvfc_get_headp(void);
>  VISIBLE_IF_KUNIT void ibmvfc_handle_crq(struct ibmvfc_crq *crq,
>  					struct ibmvfc_host *vhost,
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc_kunit.c b/drivers/scsi/ibmvscsi/ibmvfc_kunit.c
> index 3359e4ebebe2..3a41127c4e81 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc_kunit.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc_kunit.c
> @@ -22,14 +22,14 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
>  static void ibmvfc_handle_fpin_event_test(struct kunit *test)
>  {
>  	u64 *stats[IBMVFC_AE_FPIN_CONGESTION_CLEARED + 1] = { NULL };
> -	u64 post[IBMVFC_AE_FPIN_CONGESTION_CLEARED + 1];
> -	u64 pre[IBMVFC_AE_FPIN_CONGESTION_CLEARED + 1];
>  	enum ibmvfc_ae_fpin_status fs;
> -	struct ibmvfc_async_crq crq;
> +	struct ibmvfc_async_subq crq;
>  	struct ibmvfc_target *tgt;
>  	struct ibmvfc_host *vhost;
>  	struct list_head *queue;
>  	struct list_head *headp;
> +	LIST_HEAD(evt_doneq);
> +	u64 pre, post;
>  
>  
>  	headp = ibmvfc_get_headp();
> @@ -52,31 +52,23 @@ static void ibmvfc_handle_fpin_event_test(struct kunit *test)
>  		crq.valid = 0x80;
>  		crq.link_state = IBMVFC_AE_LS_LINK_UP;
>  		crq.fpin_status = fs;
> -		crq.event = cpu_to_be64(IBMVFC_AE_FPIN);
> -		crq.scsi_id = cpu_to_be64(tgt->scsi_id);
> +		crq.event = cpu_to_be16(IBMVFC_AE_FPIN);
>  		crq.wwpn = cpu_to_be64(tgt->wwpn);
> -		crq.node_name = cpu_to_be64(tgt->ids.node_name);
> -		pre[fs] = *stats[fs];
> -		ibmvfc_handle_async(&crq, vhost);
> -		post[fs] = *stats[fs];
> -		KUNIT_EXPECT_EQ(test, post[fs], pre[fs]+1);
> +		crq.id.node_name = cpu_to_be64(tgt->ids.node_name);
> +		pre = *stats[fs];
> +		ibmvfc_handle_asyncq((struct ibmvfc_crq *)&crq, vhost, &evt_doneq);
> +		post = *stats[fs];
> +		KUNIT_EXPECT_EQ(test, post, pre+1);
>  	}
>  
>  	/* bad path */
> -	for (fs = IBMVFC_AE_FPIN_LINK_CONGESTED; fs <= IBMVFC_AE_FPIN_CONGESTION_CLEARED; fs++)
> -		pre[fs] = *stats[fs];
>  	crq.valid = 0x80;
>  	crq.link_state = IBMVFC_AE_LS_LINK_UP;
>  	crq.fpin_status = 0; /* bad value */
> -	crq.event = cpu_to_be64(IBMVFC_AE_FPIN);
> -	crq.scsi_id = cpu_to_be64(tgt->scsi_id);
> +	crq.event = cpu_to_be16(IBMVFC_AE_FPIN);
>  	crq.wwpn = cpu_to_be64(tgt->wwpn);
> -	crq.node_name = cpu_to_be64(tgt->ids.node_name);
> -	ibmvfc_handle_async(&crq, vhost);
> -	for (fs = IBMVFC_AE_FPIN_LINK_CONGESTED; fs <= IBMVFC_AE_FPIN_CONGESTION_CLEARED; fs++) {
> -		post[fs] = *stats[fs];
> -		KUNIT_EXPECT_EQ(test, pre[fs], post[fs]);
> -	}
> +	crq.id.node_name = cpu_to_be64(tgt->ids.node_name);
> +	ibmvfc_handle_asyncq((struct ibmvfc_crq *)&crq, vhost, &evt_doneq);
>  }
>  
>  /**
> @@ -105,9 +97,29 @@ static void ibmvfc_noop_test(struct kunit *test)
>  	ibmvfc_handle_crq(&crq, vhost, &evtq);
>  }
>  
> +/**
> + * ibmvfc_async_subq_test - unit test for allocating async subqueue
> + * @test: pointer to kunit structure
> + *
> + * Return: void
> + */
> +static void ibmvfc_async_subq_test(struct kunit *test)
> +{
> +	struct ibmvfc_host *vhost;
> +	struct list_head *queue;
> +	struct list_head *headp;
> +
> +	headp = ibmvfc_get_headp();
> +	queue = headp->next;
> +	vhost = container_of(queue, struct ibmvfc_host, queue);
> +
> +	KUNIT_EXPECT_NOT_NULL(test, vhost->scsi_scrqs.async_scrq);
> +}
> +
>  static struct kunit_case ibmvfc_fpin_test_cases[] = {
>  	KUNIT_CASE(ibmvfc_handle_fpin_event_test),
>  	KUNIT_CASE(ibmvfc_noop_test),
> +	KUNIT_CASE(ibmvfc_async_subq_test),
>  	{},
>  };
>  
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 5/5] ibmvfc: handle extended FPIN events
       [not found] ` <20260408-ibmvfc-fpin-support-v1-5-52b06c464e03@linux.ibm.com>
@ 2026-05-07  5:48   ` Tyrel Datwyler
  2026-05-08 14:38     ` Dave Marquardt
  0 siblings, 1 reply; 11+ messages in thread
From: Tyrel Datwyler @ 2026-05-07  5:48 UTC (permalink / raw)
  To: davemarq, James E.J. Bottomley, Martin K. Petersen,
	Madhavan Srinivasan, Michael Ellerman, Nicholas Piggin,
	Christophe Leroy (CS GROUP)
  Cc: linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
> From: Dave Marquardt <davemarq@linux.ibm.com>
> 
> - negotiate use of extended FPIN events with NPIV (VIOS)
> - add code to parse and handle extended FPIN events
> - add KUnit test to test extended FPIN event handling

Same nit here as the previous 4 patches.

> ---
>  drivers/scsi/ibmvscsi/ibmvfc.c       | 45 ++++++++++++++---
>  drivers/scsi/ibmvscsi/ibmvfc.h       | 31 ++++++++++++
>  drivers/scsi/ibmvscsi/ibmvfc_kunit.c | 97 +++++++++++++++++++++++++++++++++---
>  3 files changed, 161 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index 26e39b367022..5b2b861a34c2 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -1472,6 +1472,9 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
>  }
>  
>  static __be64 ibmvfc_npiv_chan_caps[] = {
> +	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS | IBMVFC_USE_ASYNC_SUBQ |
> +		    IBMVFC_YES_SCSI | IBMVFC_CAN_HANDLE_FPIN |
> +		    IBMVFC_CAN_HANDLE_FPIN_EXT),
>  	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS | IBMVFC_USE_ASYNC_SUBQ |
>  		    IBMVFC_YES_SCSI | IBMVFC_CAN_HANDLE_FPIN),
>  	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS),
> @@ -3370,6 +3373,28 @@ ibmvfc_full_fpin_to_desc(struct ibmvfc_async_subq *ibmvfc_fpin)
>  					  cpu_to_be32(1));
>  }
>  
> +/**
> + * ibmvfc_ext_fpin_to_desc(): allocate and populate a struct fc_els_fpin struct
> + * containing a descriptor.
> + * @ibmvfc_fpin: Pointer to async subq FPIN data
> + *
> + * Allocate a struct fc_els_fpin containing a descriptor and populate
> + * based on data from *ibmvfc_fpin.
> + *
> + * Return:
> + * NULL     - unable to allocate structure
> + * non-NULL - pointer to populated struct fc_els_fpin
> + */
> +static struct fc_els_fpin *
> +ibmvfc_ext_fpin_to_desc(struct ibmvfc_async_subq_fpin *ibmvfc_fpin)
> +{
> +	return ibmvfc_common_fpin_to_desc(ibmvfc_fpin->fpin_status, ibmvfc_fpin->wwpn,
> +					  ibmvfc_fpin->fpin_data.event_type_modifier,
> +					  ibmvfc_fpin->fpin_data.event_threshold,
> +					  ibmvfc_fpin->fpin_data.event_threshold,

I see mention of threshold and period previously. Why in this case is it just
the threshold value passed for both?

-Tyrel

> +					  ibmvfc_fpin->fpin_data.event_data.event_count);
> +}
> +
>  /**
>   * ibmvfc_handle_async - Handle an async event from the adapter
>   * @crq:	crq to process
> @@ -3491,10 +3516,12 @@ VISIBLE_IF_KUNIT void ibmvfc_handle_asyncq(struct ibmvfc_crq *crq_instance,
>  					   struct ibmvfc_host *vhost,
>  					   struct list_head *evt_doneq)
>  {
> +	struct ibmvfc_async_subq_fpin *sqfpin = (struct ibmvfc_async_subq_fpin *)crq_instance;
>  	struct ibmvfc_async_subq *crq = (struct ibmvfc_async_subq *)crq_instance;
>  	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be16_to_cpu(crq->event));
>  	struct ibmvfc_target *tgt;
>  	struct fc_els_fpin *fpin;
> +	u8 fpin_status;
>  
>  	ibmvfc_log(vhost, desc->log_level,
>  		   "%s event received. wwpn: %llx, node_name: %llx%s event 0x%x\n",
> @@ -3582,7 +3609,17 @@ VISIBLE_IF_KUNIT void ibmvfc_handle_asyncq(struct ibmvfc_crq *crq_instance,
>  				continue;
>  			if (!tgt->rport)
>  				continue;
> -			fpin = ibmvfc_full_fpin_to_desc(crq);
> +			if ((crq->flags & IBMVFC_ASYNC_IS_FPIN_EXT) == 0) {
> +				fpin = ibmvfc_full_fpin_to_desc(crq);
> +				fpin_status = crq->fpin_status;
> +			} else if (!(sqfpin->fpin_data.flags & IBMVFC_FPIN_EVENT_TYPE_VALID))
> +				dev_err(vhost->dev, "Invalid extended FPIN event received");
> +			else if (!ibmvfc_check_caps(vhost, IBMVFC_SUPPORT_FPIN_EXT))
> +				dev_err(vhost->dev, "Unexpected extended FPIN event received");
> +			else {
> +				fpin = ibmvfc_ext_fpin_to_desc(sqfpin);
> +				fpin_status = sqfpin->fpin_status;
> +			}
>  			if (fpin) {
>  				fc_host_fpin_rcv(tgt->vhost->host,
>  						 sizeof(*fpin) + be32_to_cpu(fpin->desc_len),
> @@ -3591,12 +3628,8 @@ VISIBLE_IF_KUNIT void ibmvfc_handle_asyncq(struct ibmvfc_crq *crq_instance,
>  			} else
>  				dev_err(vhost->dev,
>  					"FPIN event %u received, unable to process\n",
> -					crq->fpin_status);
> +					fpin_status);
>  		}
> -		break;
> -	default:
> -		dev_err(vhost->dev, "Unknown async event received: %d\n", crq->event);
> -		break;
>  	}
>  }
>  EXPORT_SYMBOL_IF_KUNIT(ibmvfc_handle_asyncq);
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
> index b9f22613d144..c67db8d7b3fc 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.h
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.h
> @@ -186,6 +186,7 @@ struct ibmvfc_npiv_login {
>  #define IBMVFC_YES_SCSI			0x40
>  #define IBMVFC_USE_ASYNC_SUBQ		0x100
>  #define IBMVFC_CAN_USE_NOOP_CMD		0x200
> +#define IBMVFC_CAN_HANDLE_FPIN_EXT	0x800
>  	__be64 node_name;
>  	struct srp_direct_buf async;
>  	u8 partition_name[IBMVFC_MAX_NAME];
> @@ -236,6 +237,7 @@ struct ibmvfc_npiv_login_resp {
>  #define IBMVFC_SUPPORT_SCSI		0x200
>  #define IBMVFC_SUPPORT_ASYNC_SUBQ	0x800
>  #define IBMVFC_SUPPORT_NOOP_CMD		0x1000
> +#define IBMVFC_SUPPORT_FPIN_EXT		0x2000
>  	__be32 max_cmds;
>  	__be32 scsi_id_sz;
>  	__be64 max_dma_len;
> @@ -718,6 +720,7 @@ struct ibmvfc_async_crq {
>  struct ibmvfc_async_subq {
>  	volatile u8 valid;
>  #define IBMVFC_ASYNC_ID_IS_ASSOC_ID	0x01
> +#define IBMVFC_ASYNC_IS_FPIN_EXT	0x02
>  #define IBMVFC_FC_EEH			0x04
>  #define IBMVFC_FC_FW_UPDATE		0x08
>  #define IBMVFC_FC_FW_DUMP		0x10
> @@ -734,6 +737,34 @@ struct ibmvfc_async_subq {
>  	} id;
>  } __packed __aligned(8);
>  
> +struct ibmvfc_fpin_data {
> +#define IBMVFC_FPIN_EVENT_TYPE_VALID	0x01
> +#define IBMVFC_FPIN_MODIFIER_VALID	0x02
> +#define IBMVFC_FPIN_THRESHOLD_VALID	0x04
> +#define IBMVFC_FPIN_SEVERITY_VALID	0x08
> +#define IBMVFC_FPIN_EVENT_COUNT_VALID	0x10
> +	u8 flags;
> +	u8 reserved[3];
> +	__be16 event_type;
> +	__be16 event_type_modifier;
> +	__be32 event_threshold;
> +	union {
> +		u8 severity;
> +		__be32 event_count;
> +	} event_data;
> +} __packed __aligned(8);
> +
> +struct ibmvfc_async_subq_fpin {
> +	volatile u8 valid;
> +	u8 flags;
> +	u8 link_state;
> +	u8 fpin_status;
> +	__be16 event;
> +	__be16 pad;
> +	volatile __be64 wwpn;
> +	struct ibmvfc_fpin_data fpin_data;
> +} __packed __aligned(8);
> +
>  union ibmvfc_iu {
>  	struct ibmvfc_mad_common mad_common;
>  	struct ibmvfc_npiv_login_mad npiv_login;
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc_kunit.c b/drivers/scsi/ibmvscsi/ibmvfc_kunit.c
> index 3a41127c4e81..6c2a7d6f8042 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc_kunit.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc_kunit.c
> @@ -3,6 +3,7 @@
>  #include <kunit/visibility.h>
>  #include <scsi/scsi_device.h>
>  #include <scsi/scsi_transport_fc.h>
> +#include <scsi/fc/fc_els.h>
>  #include <linux/list.h>
>  #include "ibmvfc.h"
>  
> @@ -71,6 +72,25 @@ static void ibmvfc_handle_fpin_event_test(struct kunit *test)
>  	ibmvfc_handle_asyncq((struct ibmvfc_crq *)&crq, vhost, &evt_doneq);
>  }
>  
> +/**
> + * ibmvfc_async_subq_test - unit test for allocating async subqueue
> + * @test: pointer to kunit structure
> + *
> + * Return: void
> + */
> +static void ibmvfc_async_subq_test(struct kunit *test)
> +{
> +	struct ibmvfc_host *vhost;
> +	struct list_head *queue;
> +	struct list_head *headp;
> +
> +	headp = ibmvfc_get_headp();
> +	queue = headp->next;
> +	vhost = container_of(queue, struct ibmvfc_host, queue);
> +
> +	KUNIT_EXPECT_NOT_NULL(test, vhost->scsi_scrqs.async_scrq);
> +}
> +
>  /**
>   * ibmvfc_noop_test - unit test for VFC_NOOP command
>   * @test: pointer to kunit structure
> @@ -97,29 +117,94 @@ static void ibmvfc_noop_test(struct kunit *test)
>  	ibmvfc_handle_crq(&crq, vhost, &evtq);
>  }
>  
> +#define IBMVFC_TEST_FPIN_EXT(fs, ev, stat) {			\
> +	crq.valid = 0x80;					\
> +	crq.flags = IBMVFC_ASYNC_IS_FPIN_EXT;			\
> +	crq.link_state = IBMVFC_AE_LS_LINK_UP;			\
> +	crq.fpin_status = (fs);					\
> +	crq.event = cpu_to_be16(IBMVFC_AE_FPIN);		\
> +	crq.wwpn = cpu_to_be64(tgt->wwpn);			\
> +	crq.fpin_data.flags = IBMVFC_FPIN_EVENT_TYPE_VALID;	\
> +	crq.fpin_data.event_type = cpu_to_be16((ev));		\
> +	pre = READ_ONCE(tgt->rport->fpin_stats.stat);		\
> +	ibmvfc_handle_asyncq((struct ibmvfc_crq *)&crq, vhost,	\
> +			     &evt_doneq);			\
> +	post = READ_ONCE(tgt->rport->fpin_stats.stat);		\
> +}
> +
>  /**
> - * ibmvfc_async_subq_test - unit test for allocating async subqueue
> + * ibmvfc_extended_fpin_test - unit test for extended FPIN events
>   * @test: pointer to kunit structure
>   *
> + * Tests
> + *
>   * Return: void
>   */
> -static void ibmvfc_async_subq_test(struct kunit *test)
> +static void ibmvfc_extended_fpin_test(struct kunit *test)
>  {
> +	enum ibmvfc_ae_fpin_status fs;
> +	struct ibmvfc_async_subq_fpin crq;
> +	struct fc_fpin_stats stats;
> +	struct ibmvfc_target *tgt;
>  	struct ibmvfc_host *vhost;
> -	struct list_head *queue;
>  	struct list_head *headp;
> +	LIST_HEAD(evt_doneq);
> +	u64 pre, post;
>  
>  	headp = ibmvfc_get_headp();
> -	queue = headp->next;
> -	vhost = container_of(queue, struct ibmvfc_host, queue);
> +	vhost = list_first_entry(headp, struct ibmvfc_host, queue);
> +	KUNIT_ASSERT_NOT_NULL_MSG(test, vhost, "No vhost");
>  
> -	KUNIT_EXPECT_NOT_NULL(test, vhost->scsi_scrqs.async_scrq);
> +	KUNIT_ASSERT_GE(test, vhost->num_targets, 1);
> +	tgt = list_first_entry(&vhost->targets, struct ibmvfc_target, queue);
> +	KUNIT_ASSERT_NOT_NULL(test, tgt->rport);
> +
> +	stats = tgt->rport->fpin_stats;
> +
> +	for (fs = IBMVFC_AE_FPIN_LINK_CONGESTED; fs <= IBMVFC_AE_FPIN_CONGESTION_CLEARED; fs++) {
> +		switch (fs) {
> +		case IBMVFC_AE_FPIN_PORT_CLEARED:
> +		case IBMVFC_AE_FPIN_CONGESTION_CLEARED:
> +			crq.valid = 0x80;
> +			crq.flags = IBMVFC_ASYNC_IS_FPIN_EXT;
> +			crq.link_state = IBMVFC_AE_LS_LINK_UP;
> +			crq.fpin_status = fs;
> +			crq.event = cpu_to_be16(IBMVFC_AE_FPIN);
> +			crq.wwpn = cpu_to_be64(tgt->wwpn);
> +			crq.fpin_data.flags = IBMVFC_FPIN_EVENT_TYPE_VALID;
> +			crq.fpin_data.event_type = cpu_to_be16(0);
> +			pre = READ_ONCE(tgt->rport->fpin_stats.cn_clear);
> +			ibmvfc_handle_asyncq((struct ibmvfc_crq *)&crq, vhost,
> +					     &evt_doneq);
> +			post = READ_ONCE(tgt->rport->fpin_stats.cn_clear);
> +			break;
> +		case IBMVFC_AE_FPIN_LINK_CONGESTED:
> +		case IBMVFC_AE_FPIN_PORT_CONGESTED:
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_CONGN_CLEAR, cn_clear);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_CONGN_LOST_CREDIT, cn_lost_credit);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_CONGN_CREDIT_STALL, cn_credit_stall);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_CONGN_OVERSUBSCRIPTION, cn_oversubscription);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_CONGN_DEVICE_SPEC, cn_device_specific);
> +			break;
> +		case IBMVFC_AE_FPIN_PORT_DEGRADED:
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_LI_UNKNOWN, li_failure_unknown);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_LI_LINK_FAILURE, li_link_failure_count);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_LI_LOSS_OF_SYNC, li_loss_of_sync_count);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_LI_LOSS_OF_SIG, li_loss_of_signals_count);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_LI_PRIM_SEQ_ERR, li_prim_seq_err_count);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_LI_INVALID_TX_WD, li_invalid_tx_word_count);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_LI_INVALID_CRC, li_invalid_crc_count);
> +			IBMVFC_TEST_FPIN_EXT(fs, FPIN_LI_DEVICE_SPEC, li_device_specific);
> +			break;
> +		}
> +	}
>  }
>  
>  static struct kunit_case ibmvfc_fpin_test_cases[] = {
>  	KUNIT_CASE(ibmvfc_handle_fpin_event_test),
>  	KUNIT_CASE(ibmvfc_noop_test),
>  	KUNIT_CASE(ibmvfc_async_subq_test),
> +	KUNIT_CASE(ibmvfc_extended_fpin_test),
>  	{},
>  };
>  
> 


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 0/5] ibmvfc: make ibmvfc support FPIN messages
       [not found] ` <yq1a4uke7rz.fsf@ca-mkp.ca.oracle.com>
@ 2026-05-07 22:15   ` Dave Marquardt
  0 siblings, 0 replies; 11+ messages in thread
From: Dave Marquardt @ 2026-05-07 22:15 UTC (permalink / raw)
  To: Martin K. Petersen
  Cc: Dave Marquardt via B4 Relay, James E.J. Bottomley,
	Madhavan Srinivasan, Michael Ellerman, Nicholas Piggin,
	Christophe Leroy (CS GROUP), Tyrel Datwyler, linux-kernel,
	linux-scsi, linuxppc-dev, Brian King, Greg Joyce, Kyle Mahlkuch

"Martin K. Petersen" <martin.petersen@oracle.com> writes:

> Dave,
>
>> This patch series adds FPIN (fabric performance impact notification)
>> support to the ibmvfc (IBM Virtual Fibre Channel) driver. This comes
>> in three flavors:
>
> https://sashiko.dev/#/patchset/20260408-ibmvfc-fpin-support-v1-0-52b06c464e03%40linux.ibm.com

Thanks for this. I'm working through the comments and fixing things up
before sending out a v2 patch series.

-Dave

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/5] ibmvfc: add basic FPIN support
  2026-05-07  4:12   ` [PATCH 1/5] ibmvfc: add basic FPIN support Tyrel Datwyler
@ 2026-05-07 22:22     ` Dave Marquardt
  0 siblings, 0 replies; 11+ messages in thread
From: Dave Marquardt @ 2026-05-07 22:22 UTC (permalink / raw)
  To: Tyrel Datwyler
  Cc: James E.J. Bottomley, Martin K. Petersen, Madhavan Srinivasan,
	Michael Ellerman, Nicholas Piggin, Christophe Leroy (CS GROUP),
	linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

Tyrel Datwyler <tyreld@linux.ibm.com> writes:

> On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
>> From: Dave Marquardt <davemarq@linux.ibm.com>
>> 
>> - Add FPIN event descriptor
>> - Add congestion cleared status
>> - Add code to handle basic FPIN async event
>> - Add KUnit tests
>
> You need a more detailed description of your changes here for the commit log body.
>
> You will also need a signed off tag from yourself for this to even be merged.

Odd. I used b4 to prepare the patch set, and it should have added a
Signed-off-by: tag. I'll double check it next round.

> https://www.kernel.org/doc/html/latest/process/submitting-patches.html
>
>> ---
>>  drivers/scsi/Kconfig                 |  10 ++
>>  drivers/scsi/ibmvscsi/Makefile       |   1 +
>>  drivers/scsi/ibmvscsi/ibmvfc.c       | 189 ++++++++++++++++++++++++++++++++++-
>>  drivers/scsi/ibmvscsi/ibmvfc.h       |   9 ++
>>  drivers/scsi/ibmvscsi/ibmvfc_kunit.c |  95 ++++++++++++++++++
>>  5 files changed, 302 insertions(+), 2 deletions(-)
>
> <snip>
>
>> +static struct fc_els_fpin *
>> +ibmvfc_common_fpin_to_desc(u8 fpin_status, __be64 wwpn, __be16 modifier,
>> +			   __be32 period, __be32 threshold, __be32 event_count)
>> +{
>> +	struct fc_fn_peer_congn_desc *pdesc;
>> +	struct fc_fn_congn_desc *cdesc;
>> +	struct fc_fn_li_desc *ldesc;
>> +	struct fc_els_fpin *fpin;
>> +	size_t size;
>> +
>> +	size = ibmvfc_fpin_size_helper(fpin_status);
>> +	if (size == 0)
>> +		return NULL;
>> +
>> +	fpin = kzalloc(size, GFP_KERNEL);
>
> This appears to be called by ibmvfc_handle_async() with runs in atomic context
> and cannot therefore sleep. This allocation needs to be GFP_ATOMIC. Although
> there is another issue below that might make this moot.

Noted. As to the problem below, once this is moved to running in a work
queue worker thread, this can stay as is.

>> +	if (fpin == NULL)
>> +		return NULL;
>> +
>> +	fpin->fpin_cmd = ELS_FPIN;
>> +
>> +	switch (fpin_status) {
>> +	case IBMVFC_AE_FPIN_CONGESTION_CLEARED:
>> +	case IBMVFC_AE_FPIN_LINK_CONGESTED:
>> +		fpin->desc_len = cpu_to_be32(sizeof(struct fc_fn_congn_desc));
>> +		cdesc = (struct fc_fn_congn_desc *)fpin->fpin_desc;
>> +		cdesc->desc_tag = cpu_to_be32(ELS_DTAG_CONGESTION);
>> +		cdesc->desc_len = cpu_to_be32(FC_TLV_DESC_LENGTH_FROM_SZ(*cdesc));
>> +		if (fpin_status == IBMVFC_AE_FPIN_CONGESTION_CLEARED)
>> +			cdesc->event_type = cpu_to_be16(FPIN_CONGN_CLEAR);
>> +		else
>> +			cdesc->event_type = cpu_to_be16(FPIN_CONGN_DEVICE_SPEC);
>> +		cdesc->event_modifier = modifier;
>> +		cdesc->event_period = period;
>> +		cdesc->severity = FPIN_CONGN_SEVERITY_WARNING;
>> +		break;
>> +	case IBMVFC_AE_FPIN_PORT_CONGESTED:
>> +	case IBMVFC_AE_FPIN_PORT_CLEARED:
>> +		fpin->desc_len = cpu_to_be32(sizeof(struct fc_fn_peer_congn_desc));
>> +		pdesc = (struct fc_fn_peer_congn_desc *)fpin->fpin_desc;
>> +		pdesc->desc_tag = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
>> +		pdesc->desc_len = cpu_to_be32(FC_TLV_DESC_LENGTH_FROM_SZ(*pdesc));
>> +		if (fpin_status == IBMVFC_AE_FPIN_PORT_CLEARED)
>> +			pdesc->event_type = cpu_to_be16(FPIN_CONGN_CLEAR);
>> +		else
>> +			pdesc->event_type = cpu_to_be16(FPIN_CONGN_DEVICE_SPEC);
>> +		pdesc->event_modifier = modifier;
>> +		pdesc->event_period = period;
>> +		pdesc->detecting_wwpn = cpu_to_be64(0);
>> +		pdesc->attached_wwpn = wwpn;
>> +		pdesc->pname_count = cpu_to_be32(1);
>> +		pdesc->pname_list[0] = wwpn;
>> +		break;
>> +	case IBMVFC_AE_FPIN_PORT_DEGRADED:
>> +		fpin->desc_len = cpu_to_be32(sizeof(struct fc_fn_li_desc));
>> +		ldesc = (struct fc_fn_li_desc *)fpin->fpin_desc;
>> +		ldesc->desc_tag = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
>> +		ldesc->desc_len = cpu_to_be32(FC_TLV_DESC_LENGTH_FROM_SZ(*ldesc));
>> +		ldesc->event_type = cpu_to_be16(FPIN_LI_UNKNOWN);
>> +		ldesc->event_modifier = modifier;
>> +		ldesc->event_threshold = threshold;
>> +		ldesc->event_count = event_count;
>> +		ldesc->detecting_wwpn = cpu_to_be64(0);
>> +		ldesc->attached_wwpn = wwpn;
>> +		ldesc->pname_count = cpu_to_be32(1);
>> +		ldesc->pname_list[0] = wwpn;
>> +		break;
>> +	default:
>> +		/* This should be caught above. */
>> +		kfree(fpin);
>> +		fpin = NULL;
>> +		break;
>> +	}
>> +
>> +	return fpin;
>> +}
>> +
>> +/**
>> + * ibmvfc_basic_fpin_to_desc(): allocate and populate a struct fc_els_fpin struct
>> + * containing a descriptor.
>> + * @ibmvfc_fpin: Pointer to async crq
>> + *
>> + * Allocate a struct fc_els_fpin containing a descriptor and populate
>> + * based on data from *ibmvfc_fpin.
>> + *
>> + * Return:
>> + * NULL     - unable to allocate structure
>> + * non-NULL - pointer to populated struct fc_els_fpin
>> + */
>> +static struct fc_els_fpin *
>> +/*XXX*/ibmvfc_basic_fpin_to_desc(struct ibmvfc_async_crq *crq)
>
> What is with this /*XXX*/? I can't find it once I apply the patchset so I assume
> its removed in a later patch, but it should be removed here.

An artifact I missed in squashing commits. Thanks for pointing it out.

>> +{
>> +	return ibmvfc_common_fpin_to_desc(crq->fpin_status, crq->wwpn,
>> +					  cpu_to_be16(0),
>> +					  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_PERIOD),
>> +					  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_THRESHOLD),
>> +					  cpu_to_be32(1));
>> +}
>> +
>>  /**
>>   * ibmvfc_handle_async - Handle an async event from the adapter
>>   * @crq:	crq to process
>>   * @vhost:	ibmvfc host struct
>>   *
>>   **/
>> -static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
>> -				struct ibmvfc_host *vhost)
>> +VISIBLE_IF_KUNIT void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
>> +					  struct ibmvfc_host *vhost)
>>  {
>>  	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
>>  	struct ibmvfc_target *tgt;
>> +	struct fc_els_fpin *fpin;
>>  
>>  	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
>>  		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
>> @@ -3269,11 +3422,37 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
>>  	case IBMVFC_AE_HALT:
>>  		ibmvfc_link_down(vhost, IBMVFC_HALTED);
>>  		break;
>> +	case IBMVFC_AE_FPIN:
>> +		if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
>> +			break;
>> +		list_for_each_entry(tgt, &vhost->targets, queue) {
>> +			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
>> +				continue;
>> +			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
>> +				continue;
>> +			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
>> +				continue;
>> +			if (!tgt->rport)
>> +				continue;
>> +			fpin = ibmvfc_basic_fpin_to_desc(crq);
>> +			if (fpin) {
>> +				fc_host_fpin_rcv(tgt->vhost->host,
>> +						 sizeof(*fpin) +
>> +						       be32_to_cpu(fpin->desc_len),
>> +						 (char *)fpin, 0);
>
> This call to fc_host_fpin_rcv() appears to be problematic as it assumes no locks
> are held, but ibmvfc_handle_async() is called with the scsi host lock held. We
> already do a lot more work than we probaly should in our interrupt handler. I
> think we maybe need to pass the FPIN work off to a workqueue instead to be
> handled in process context instead.

Agreed. I'm working on adding a work queue for offloading this FPIN
processing.

-Dave

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/5] ibmvfc: Add NOOP command support
  2026-05-07  4:17   ` [PATCH 2/5] ibmvfc: Add NOOP command support Tyrel Datwyler
@ 2026-05-07 22:25     ` Dave Marquardt
  0 siblings, 0 replies; 11+ messages in thread
From: Dave Marquardt @ 2026-05-07 22:25 UTC (permalink / raw)
  To: Tyrel Datwyler
  Cc: James E.J. Bottomley, Martin K. Petersen, Madhavan Srinivasan,
	Michael Ellerman, Nicholas Piggin, Christophe Leroy (CS GROUP),
	linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

Tyrel Datwyler <tyreld@linux.ibm.com> writes:

> On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
>> From: Dave Marquardt <davemarq@linux.ibm.com>
>
>> - Add VFC_NOOP command support
>> - Add KUnit tests for VFC_NOOP command
>> ---
>>  drivers/scsi/ibmvscsi/ibmvfc.c       | 23 +++++++++++++----------
>>  drivers/scsi/ibmvscsi/ibmvfc.h       | 13 +++++++++++++
>>  drivers/scsi/ibmvscsi/ibmvfc_kunit.c | 27 +++++++++++++++++++++++++++
>>  3 files changed, 53 insertions(+), 10 deletions(-)
>> 
>> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
>> index 3ac376ba2c62..808301fa452d 100644
>> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
>> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
>> @@ -189,13 +189,6 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
>>  	return rc;
>>  }
>>  
>> -static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
>> -{
>> -	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
>> -
>> -	return (host_caps & cap_flags) ? 1 : 0;
>> -}
>> -
>
> It appears you are moving this to ibmvfc.h? Is there reasoning outside making it
> visible to kunit?

That's exactly why I'm moving this to ibmvfc.h. Alternatively I could
export this routine. I'm okay doing that if you prefer it.

>>  static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
>>  						   struct ibmvfc_cmd *vfc_cmd)
>>  {
>> @@ -1512,7 +1505,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
>>  		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
>>  
>>  	login_info->max_cmds = cpu_to_be32(max_cmds);
>> -	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
>> +	login_info->capabilities =
>> +		cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN |
>> +			    IBMVFC_CAN_USE_NOOP_CMD);
>>  
>>  	if (vhost->mq_enabled || vhost->using_channels)
>>  		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
>> @@ -3461,8 +3456,8 @@ EXPORT_SYMBOL_IF_KUNIT(ibmvfc_handle_async);
>>   * @evt_doneq:	Event done queue
>>   *
>>  **/
>> -static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
>> -			      struct list_head *evt_doneq)
>> +VISIBLE_IF_KUNIT void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
>> +					struct list_head *evt_doneq)
>>  {
>>  	long rc;
>>  	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
>> @@ -3520,6 +3515,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
>>  	if (crq->format == IBMVFC_ASYNC_EVENT)
>>  		return;
>>  
>> +	if (crq->format == IBMVFC_VFC_NOOP) {
>> +		if (!ibmvfc_check_caps(vhost, IBMVFC_SUPPORT_NOOP_CMD))
>> +			dev_err(vhost->dev,
>> +				"Received unexpected NOOP command from partner\n");
>
> If we have a misbahaved VIOS partner we may want to ratelimit this dev_err so
> that we don't flood the log. Probably a corner case, but I don't think it hurts.

Okay, I'll add rate limiting in v2.

-Dave

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 3/5] ibmvfc: make ibmvfc login to fabric
  2026-05-07  5:03   ` [PATCH 3/5] ibmvfc: make ibmvfc login to fabric Tyrel Datwyler
@ 2026-05-07 22:34     ` Dave Marquardt
  0 siblings, 0 replies; 11+ messages in thread
From: Dave Marquardt @ 2026-05-07 22:34 UTC (permalink / raw)
  To: Tyrel Datwyler
  Cc: James E.J. Bottomley, Martin K. Petersen, Madhavan Srinivasan,
	Michael Ellerman, Nicholas Piggin, Christophe Leroy (CS GROUP),
	linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

Tyrel Datwyler <tyreld@linux.ibm.com> writes:

> On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
>> From: Dave Marquardt <davemarq@linux.ibm.com>
>> 
>> Make ibmvfc login to fabric when NPIV login returns SUPPORT_SCSI or
>> SUPPORT_NVMEOF capabilities.
>
> Again better commit log message here and developer sign off tag.
>
>> ---
>>  drivers/scsi/ibmvscsi/ibmvfc.c | 100 ++++++++++++++++++++++++++++++++++++++---
>>  drivers/scsi/ibmvscsi/ibmvfc.h |  20 +++++++++
>>  2 files changed, 115 insertions(+), 5 deletions(-)
>> 
>> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
>> index 808301fa452d..803fc3caa14d 100644
>> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
>> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
>> @@ -5205,6 +5205,89 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
>>  		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
>>  }
>>  
>> +static void ibmvfc_fabric_login_done(struct ibmvfc_event *evt)
>> +{
>> +	struct ibmvfc_fabric_login *rsp = &evt->xfer_iu->fabric_login;
>> +	u32 mad_status = be16_to_cpu(rsp->common.status);
>> +	struct ibmvfc_host *vhost = evt->vhost;
>> +	int level = IBMVFC_DEFAULT_LOG_LEVEL;
>> +
>> +	ENTER;
>> +
>> +	switch (mad_status) {
>> +	case IBMVFC_MAD_SUCCESS:
>> +		vhost->logged_in = 1;
>
> I'm not sure I see the point of setting logged_in here since we already set it
> in npiv_login_done.

Agreed.

>> +		vhost->fabric_capabilities = rsp->capabilities;
>
> The way this is currently spec'd out there are no linux relevant capabilities
> coming from fabric login. So, I'm not sure there is a reason to save them at
> this point.

Okay.

>> +		fc_host_port_id(vhost->host) = be64_to_cpu(rsp->nport_id);
>> +		ibmvfc_free_event(evt);
>> +		break;
>> +
>> +	case IBMVFC_MAD_FAILED:
>> +		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
>> +			level += ibmvfc_retry_host_init(vhost);
>> +		else
>> +			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
>> +		ibmvfc_log(vhost, level, "Fabric Login failed: %s (%x:%x)\n",
>> +			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
>> +						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
>> +		ibmvfc_free_event(evt);
>> +		LEAVE;
>> +		return;
>> +
>> +	case IBMVFC_MAD_CRQ_ERROR:
>> +		ibmvfc_retry_host_init(vhost);
>> +		fallthrough;
>> +
>> +	case IBMVFC_MAD_DRIVER_FAILED:
>> +		ibmvfc_free_event(evt);
>> +		LEAVE;
>> +		return;
>> +
>> +	default:
>> +		dev_err(vhost->dev, "Invalid fabric Login response: 0x%x\n", mad_status);
>> +		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
>> +		ibmvfc_free_event(evt);
>> +		LEAVE;
>> +		return;
>> +	}
>> +
>> +	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
>> +	wake_up(&vhost->work_wait_q);
>> +
>> +	LEAVE;
>> +}
>> +
>> +static void ibmvfc_fabric_login(struct ibmvfc_host *vhost)
>> +{
>> +	struct ibmvfc_fabric_login *mad;
>> +	struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
>> +	int level = IBMVFC_DEFAULT_LOG_LEVEL;
>> +
>> +	if (!evt) {
>
> I think we need to hard reset here or we are dead in the water if there are no
> events.

I will add a hard reset here.

>> +		ibmvfc_log(vhost, level, "Fabric Login failed: no available events\n");
>> +		return;
>> +	}
>> +
>> +	ibmvfc_init_event(evt, ibmvfc_fabric_login_done, IBMVFC_MAD_FORMAT);
>> +	mad = &evt->iu.fabric_login;
>> +	memset(mad, 0, sizeof(*mad));
>> +	if (vhost->scsi_scrqs.protocol == IBMVFC_PROTO_SCSI)
>> +		mad->common.opcode = cpu_to_be32(IBMVFC_FABRIC_LOGIN);
>> +	else if (vhost->scsi_scrqs.protocol == IBMVFC_PROTO_NVME)
>> +		mad->common.opcode = cpu_to_be32(IBMVFC_NVMF_FABRIC_LOGIN);
>
> The VIOS won't return NVMF support unless we advertise it. So, I think its best
> to omit any NVMF releveant changes that are spec'd as they aren't being applied
> in a proper workflow here anyways. If the driver advertised both SCSI and NVMF
> support the current code would never do a NVMF fabric login as it would never
> fall through here.

Okay, I'll removed the NVMF changes.

>> +	else {
>> +		ibmvfc_log(vhost, level, "Fabric Login failed: unknown protocol\n");
>> +		return;
>> +	}
>> +	mad->common.version = cpu_to_be32(1);
>> +	mad->common.length = cpu_to_be16(sizeof(*mad));
>> +
>> +	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
>> +
>> +	if (ibmvfc_send_event(evt, vhost, default_timeout))
>> +		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
>> +}
>> +
>>  static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
>>  {
>>  	struct ibmvfc_host *vhost = evt->vhost;
>> @@ -5251,8 +5334,12 @@ static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
>>  		return;
>>  	}
>>  
>> -	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
>> -	wake_up(&vhost->work_wait_q);
>> +	if (ibmvfc_check_caps(vhost, (IBMVFC_SUPPORT_SCSI | IBMVFC_SUPPORT_NVMEOF))) {
>> +		ibmvfc_fabric_login(vhost);
>
> Again drop the NVMEOF code.

Okay.

>> +	} else {
>> +		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
>> +		wake_up(&vhost->work_wait_q);
>> +	}
>>  }
>>  
>>  static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
>> @@ -5443,9 +5530,12 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
>>  	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
>>  	vhost->host->max_sectors = npiv_max_sectors;
>>  
>> -	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
>> -		ibmvfc_channel_enquiry(vhost);
>> -	} else {
>> +	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS)) {
>> +		if (vhost->do_enquiry)
>> +			ibmvfc_channel_enquiry(vhost);
>
> I'm not sure I understand expanding this code to a second if block as there is
> no functional change.

Agreed.

>> +	} else if (ibmvfc_check_caps(vhost, (IBMVFC_SUPPORT_SCSI | IBMVFC_SUPPORT_NVMEOF)))
>
> Again drop NVMEOF and NVMF related changes.

Yes.

>> +		ibmvfc_fabric_login(vhost);
>> +	else {
>>  		vhost->do_enquiry = 0;
>>  		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
>>  		wake_up(&vhost->work_wait_q);
>> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
>> index cd0917f70c6d..4f680c5d9558 100644
>> --- a/drivers/scsi/ibmvscsi/ibmvfc.h
>> +++ b/drivers/scsi/ibmvscsi/ibmvfc.h
>> @@ -138,6 +138,8 @@ enum ibmvfc_mad_types {
>>  	IBMVFC_CHANNEL_ENQUIRY	= 0x1000,
>>  	IBMVFC_CHANNEL_SETUP	= 0x2000,
>>  	IBMVFC_CONNECTION_INFO	= 0x4000,
>> +	IBMVFC_FABRIC_LOGIN	= 0x8000,
>> +	IBMVFC_NVMF_FABRIC_LOGIN	= 0x8001,
>>  };
>>  
>>  struct ibmvfc_mad_common {
>> @@ -227,6 +229,8 @@ struct ibmvfc_npiv_login_resp {
>>  #define IBMVFC_MAD_VERSION_CAP		0x20
>>  #define IBMVFC_HANDLE_VF_WWPN		0x40
>>  #define IBMVFC_CAN_SUPPORT_CHANNELS	0x80
>> +#define IBMVFC_SUPPORT_NVMEOF		0x100
>> +#define IBMVFC_SUPPORT_SCSI		0x200
>>  #define IBMVFC_SUPPORT_NOOP_CMD		0x1000
>>  	__be32 max_cmds;
>>  	__be32 scsi_id_sz;
>> @@ -590,6 +594,19 @@ struct ibmvfc_connection_info {
>>  	__be64 reserved[16];
>>  } __packed __aligned(8);
>>  
>> +struct ibmvfc_fabric_login {
>> +	struct ibmvfc_mad_common common;
>> +	__be64 flags;
>> +#define IBMVFC_STRIP_MERGE	0x1
>> +#define IBMVFC_LINK_COMMANDS	0x2
>> +	__be64 capabilities;
>> +	__be64 nport_id;
>> +	__be16 status;
>> +	__be16 error;
>> +	__be32 pad;
>> +	__be64 reserved[16];
>> +} __packed __aligned(8);
>> +
>>  struct ibmvfc_trace_start_entry {
>>  	u32 xfer_len;
>>  } __packed;
>> @@ -709,6 +726,7 @@ union ibmvfc_iu {
>>  	struct ibmvfc_channel_enquiry channel_enquiry;
>>  	struct ibmvfc_channel_setup_mad channel_setup;
>>  	struct ibmvfc_connection_info connection_info;
>> +	struct ibmvfc_fabric_login fabric_login;
>>  } __packed __aligned(8);
>>  
>>  enum ibmvfc_target_action {
>> @@ -921,6 +939,8 @@ struct ibmvfc_host {
>>  	struct work_struct rport_add_work_q;
>>  	wait_queue_head_t init_wait_q;
>>  	wait_queue_head_t work_wait_q;
>> +	__be64 fabric_capabilities;
>> +	unsigned int login_cap_index;
>
> Lets drop these as they serve no purpose for Linux. If the spec changes to
> introduce capabilites releveant to Linux we can add it then.

Okay. You discussed fabric_capabilities. I think login_cap_index isn't
useful until patch 4 or 5, so I'll drop it from patch 4.

-Dave


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 4/5] ibmvfc: use async sub-queue for FPIN messages
  2026-05-07  5:41   ` [PATCH 4/5] ibmvfc: use async sub-queue for FPIN messages Tyrel Datwyler
@ 2026-05-07 22:40     ` Dave Marquardt
  0 siblings, 0 replies; 11+ messages in thread
From: Dave Marquardt @ 2026-05-07 22:40 UTC (permalink / raw)
  To: Tyrel Datwyler
  Cc: James E.J. Bottomley, Martin K. Petersen, Madhavan Srinivasan,
	Michael Ellerman, Nicholas Piggin, Christophe Leroy (CS GROUP),
	linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

Tyrel Datwyler <tyreld@linux.ibm.com> writes:

> On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
>> From: Dave Marquardt <davemarq@linux.ibm.com>
>> 
>> - allocate async sub-queue
>> - allocate interrupt and set up handler
>> - negotiate use of async sub-queue with NPIV (VIOS)
>> - refactor ibmvfc_basic_fpin_to_desc() and ibmvfc_full_fpin_to_desc()
>>   into common routine
>> - add KUnit test to verify async sub-queue is allocated
>
> Again more descriptive commit log message required here. Also, this looks like a
> lot of things being implmented. Can this be broken into multiple patches? It
> sure looks like a bunch of functional changes that build on each other.

Yes, I'll break this up into more patches.

>> ---
>>  drivers/scsi/ibmvscsi/ibmvfc.c       | 325 ++++++++++++++++++++++++++++++++---
>>  drivers/scsi/ibmvscsi/ibmvfc.h       |  29 +++-
>>  drivers/scsi/ibmvscsi/ibmvfc_kunit.c |  52 +++---
>>  3 files changed, 363 insertions(+), 43 deletions(-)
>> 
>> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
>> index 803fc3caa14d..26e39b367022 100644
>> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
>> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
>> @@ -1471,6 +1471,13 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
>>  	of_node_put(rootdn);
>>  }
>>  
>> +static __be64 ibmvfc_npiv_chan_caps[] = {
>> +	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS | IBMVFC_USE_ASYNC_SUBQ |
>> +		    IBMVFC_YES_SCSI | IBMVFC_CAN_HANDLE_FPIN),
>> +	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS),
>> +};
>> +#define IBMVFC_NPIV_CHAN_CAPS_SIZE (sizeof(ibmvfc_npiv_chan_caps)/sizeof(__be64))
>> +
>
> I really don't understand what you are doing here? You seem to be definig
> various sets of capabilities, but how does the driver decide which set to use?
> As far as I can tell the index is increased and the capabilities decrease each
> time a transport event is received. This looks like maybe its just a testing hack.

My thought was to deal with an older VIOS that doesn't support the async
sub-queue and full FPIN. But I suppose the response should just not set the
appropriate bits. I'll go re-read the NPIV spec and figure out if this
is actually needed.

>>  /**
>>   * ibmvfc_set_login_info - Setup info for NPIV login
>>   * @vhost:	ibmvfc host struct
>> @@ -1486,6 +1493,8 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
>>  	const char *location;
>>  	u16 max_cmds;
>>  
>> +	ENTER;
>> +
>>  	max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
>>  	if (mq_enabled)
>>  		max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
>> @@ -1509,8 +1518,12 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
>>  		cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN |
>>  			    IBMVFC_CAN_USE_NOOP_CMD);
>>  
>> -	if (vhost->mq_enabled || vhost->using_channels)
>> -		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
>> +	if (vhost->mq_enabled || vhost->using_channels) {
>> +		if (vhost->login_cap_index >= IBMVFC_NPIV_CHAN_CAPS_SIZE)
>> +			login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
>> +		else
>> +			login_info->capabilities |= ibmvfc_npiv_chan_caps[vhost->login_cap_index];
>> +	}
>>  
>>  	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
>>  	login_info->async.len = cpu_to_be32(async_crq->size *
>> @@ -1524,6 +1537,8 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
>>  	location = of_get_property(of_node, "ibm,loc-code", NULL);
>>  	location = location ? location : dev_name(vhost->dev);
>>  	strscpy(login_info->drc_name, location, sizeof(login_info->drc_name));
>> +
>> +	LEAVE;
>>  }
>>  
>>  /**
>> @@ -3323,7 +3338,7 @@ ibmvfc_common_fpin_to_desc(u8 fpin_status, __be64 wwpn, __be16 modifier,
>>   * non-NULL - pointer to populated struct fc_els_fpin
>>   */
>>  static struct fc_els_fpin *
>> -/*XXX*/ibmvfc_basic_fpin_to_desc(struct ibmvfc_async_crq *crq)
>
> I mentioned this /*XXX*/ in an earlier patch. This needs to be fixed in that patch.

Yes.

>> +ibmvfc_basic_fpin_to_desc(struct ibmvfc_async_crq *crq)
>>  {
>>  	return ibmvfc_common_fpin_to_desc(crq->fpin_status, crq->wwpn,
>>  					  cpu_to_be16(0),
>> @@ -3332,6 +3347,29 @@ static struct fc_els_fpin *
>>  					  cpu_to_be32(1));
>>  }
>>  
>> +/**
>> + * ibmvfc_full_fpin_to_desc(): allocate and populate a struct fc_els_fpin struct
>> + * containing a descriptor.
>> + * @ibmvfc_fpin: Pointer to async subq FPIN data
>> + *
>> + * Allocate a struct fc_els_fpin containing a descriptor and populate
>> + * based on data from *ibmvfc_fpin.
>> + *
>> + * Return:
>> + * NULL     - unable to allocate structure
>> + * non-NULL - pointer to populated struct fc_els_fpin
>> + */
>> +static struct fc_els_fpin *
>> +ibmvfc_full_fpin_to_desc(struct ibmvfc_async_subq *ibmvfc_fpin)
>> +{
>> +	return ibmvfc_common_fpin_to_desc(ibmvfc_fpin->fpin_status,
>> +					  ibmvfc_fpin->wwpn,
>> +					  cpu_to_be16(0),
>> +					  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_PERIOD),
>> +					  cpu_to_be32(IBMVFC_FPIN_DEFAULT_EVENT_THRESHOLD),
>> +					  cpu_to_be32(1));
>> +}
>> +
>>  /**
>>   * ibmvfc_handle_async - Handle an async event from the adapter
>>   * @crq:	crq to process
>> @@ -3449,6 +3487,120 @@ VISIBLE_IF_KUNIT void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
>>  }
>>  EXPORT_SYMBOL_IF_KUNIT(ibmvfc_handle_async);
>>  
>> +VISIBLE_IF_KUNIT void ibmvfc_handle_asyncq(struct ibmvfc_crq *crq_instance,
>> +					   struct ibmvfc_host *vhost,
>> +					   struct list_head *evt_doneq)
>> +{
>> +	struct ibmvfc_async_subq *crq = (struct ibmvfc_async_subq *)crq_instance;
>> +	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be16_to_cpu(crq->event));
>> +	struct ibmvfc_target *tgt;
>> +	struct fc_els_fpin *fpin;
>> +
>> +	ibmvfc_log(vhost, desc->log_level,
>> +		   "%s event received. wwpn: %llx, node_name: %llx%s event 0x%x\n",
>> +		   desc->desc, be64_to_cpu(crq->wwpn), be64_to_cpu(crq->id.node_name),
>> +		   ibmvfc_get_link_state(crq->link_state), be16_to_cpu(crq->event));
>
> Was there no way to not copy/paste what looks like basically ibmvfc_handle_async
> into ibmvfc_handle_asyncq? This is a bunch of unnecessary code bloat. The major
> difference seems that crq->event is be64 on the standard CRQ and be16 on a
> sub-crq and accessing certain fields differently.

That's a good idea. I'll see what I can do. Seems like a little
refactoring should make it work.

> Again I think maybe we need to consider moving all the async work into a workqueue.

My initial thought was to just queue the FPIN processing of
ibmvfc_handle_asyncq to a work queue to resolve the problem of calling
fc_host_fpin_rcv from interrupt context, but putting all of this
processing into a work queue would work too. I'll look into it.

-Dave

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 5/5] ibmvfc: handle extended FPIN events
  2026-05-07  5:48   ` [PATCH 5/5] ibmvfc: handle extended FPIN events Tyrel Datwyler
@ 2026-05-08 14:38     ` Dave Marquardt
  0 siblings, 0 replies; 11+ messages in thread
From: Dave Marquardt @ 2026-05-08 14:38 UTC (permalink / raw)
  To: Tyrel Datwyler
  Cc: James E.J. Bottomley, Martin K. Petersen, Madhavan Srinivasan,
	Michael Ellerman, Nicholas Piggin, Christophe Leroy (CS GROUP),
	linux-kernel, linux-scsi, linuxppc-dev, Brian King, Greg Joyce,
	Kyle Mahlkuch

Tyrel Datwyler <tyreld@linux.ibm.com> writes:

> On 4/8/26 10:07 AM, Dave Marquardt via B4 Relay wrote:
>> From: Dave Marquardt <davemarq@linux.ibm.com>
>> 
>> - negotiate use of extended FPIN events with NPIV (VIOS)
>> - add code to parse and handle extended FPIN events
>> - add KUnit test to test extended FPIN event handling
>
> Same nit here as the previous 4 patches.
>
>> ---
>>  drivers/scsi/ibmvscsi/ibmvfc.c       | 45 ++++++++++++++---
>>  drivers/scsi/ibmvscsi/ibmvfc.h       | 31 ++++++++++++
>>  drivers/scsi/ibmvscsi/ibmvfc_kunit.c | 97 +++++++++++++++++++++++++++++++++---
>>  3 files changed, 161 insertions(+), 12 deletions(-)
>> 
>> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
>> index 26e39b367022..5b2b861a34c2 100644
>> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
>> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
>> @@ -1472,6 +1472,9 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
>>  }
>>  
>>  static __be64 ibmvfc_npiv_chan_caps[] = {
>> +	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS | IBMVFC_USE_ASYNC_SUBQ |
>> +		    IBMVFC_YES_SCSI | IBMVFC_CAN_HANDLE_FPIN |
>> +		    IBMVFC_CAN_HANDLE_FPIN_EXT),
>>  	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS | IBMVFC_USE_ASYNC_SUBQ |
>>  		    IBMVFC_YES_SCSI | IBMVFC_CAN_HANDLE_FPIN),
>>  	cpu_to_be64(IBMVFC_CAN_USE_CHANNELS),
>> @@ -3370,6 +3373,28 @@ ibmvfc_full_fpin_to_desc(struct ibmvfc_async_subq *ibmvfc_fpin)
>>  					  cpu_to_be32(1));
>>  }
>>  
>> +/**
>> + * ibmvfc_ext_fpin_to_desc(): allocate and populate a struct fc_els_fpin struct
>> + * containing a descriptor.
>> + * @ibmvfc_fpin: Pointer to async subq FPIN data
>> + *
>> + * Allocate a struct fc_els_fpin containing a descriptor and populate
>> + * based on data from *ibmvfc_fpin.
>> + *
>> + * Return:
>> + * NULL     - unable to allocate structure
>> + * non-NULL - pointer to populated struct fc_els_fpin
>> + */
>> +static struct fc_els_fpin *
>> +ibmvfc_ext_fpin_to_desc(struct ibmvfc_async_subq_fpin *ibmvfc_fpin)
>> +{
>> +	return ibmvfc_common_fpin_to_desc(ibmvfc_fpin->fpin_status, ibmvfc_fpin->wwpn,
>> +					  ibmvfc_fpin->fpin_data.event_type_modifier,
>> +					  ibmvfc_fpin->fpin_data.event_threshold,
>> +					  ibmvfc_fpin->fpin_data.event_threshold,
>
> I see mention of threshold and period previously. Why in this case is it just
> the threshold value passed for both?

I'll look into this. There's no obvious period here in ibmvfc_fpin or
ibmvfc_fpin->fpin_data. It may be more appropriate to use a default
period.

-Dave

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2026-05-08 14:38 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20260408-ibmvfc-fpin-support-v1-0-52b06c464e03@linux.ibm.com>
     [not found] ` <20260408-ibmvfc-fpin-support-v1-1-52b06c464e03@linux.ibm.com>
2026-05-07  4:12   ` [PATCH 1/5] ibmvfc: add basic FPIN support Tyrel Datwyler
2026-05-07 22:22     ` Dave Marquardt
     [not found] ` <20260408-ibmvfc-fpin-support-v1-2-52b06c464e03@linux.ibm.com>
2026-05-07  4:17   ` [PATCH 2/5] ibmvfc: Add NOOP command support Tyrel Datwyler
2026-05-07 22:25     ` Dave Marquardt
     [not found] ` <20260408-ibmvfc-fpin-support-v1-3-52b06c464e03@linux.ibm.com>
2026-05-07  5:03   ` [PATCH 3/5] ibmvfc: make ibmvfc login to fabric Tyrel Datwyler
2026-05-07 22:34     ` Dave Marquardt
     [not found] ` <20260408-ibmvfc-fpin-support-v1-4-52b06c464e03@linux.ibm.com>
2026-05-07  5:41   ` [PATCH 4/5] ibmvfc: use async sub-queue for FPIN messages Tyrel Datwyler
2026-05-07 22:40     ` Dave Marquardt
     [not found] ` <20260408-ibmvfc-fpin-support-v1-5-52b06c464e03@linux.ibm.com>
2026-05-07  5:48   ` [PATCH 5/5] ibmvfc: handle extended FPIN events Tyrel Datwyler
2026-05-08 14:38     ` Dave Marquardt
     [not found] ` <yq1a4uke7rz.fsf@ca-mkp.ca.oracle.com>
2026-05-07 22:15   ` [PATCH 0/5] ibmvfc: make ibmvfc support FPIN messages Dave Marquardt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox