* [PATCH v3 01/18] nvmet-fcloop: remove nport from list on last user
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
@ 2025-03-18 10:39 ` Daniel Wagner
2025-03-18 10:39 ` [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount Daniel Wagner
` (16 subsequent siblings)
17 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:39 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
The nport object has an association with the rport and lport object,
that means we can only remove an nport object from the global nport_list
after the last user of an rport or lport is gone.
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 3390215b1c1f040001985a0d33741e57f1f80cb3..09546c3161fd9828234e58641de3e53519e27824 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1004,6 +1004,11 @@ fcloop_nport_free(struct kref *ref)
{
struct fcloop_nport *nport =
container_of(ref, struct fcloop_nport, ref);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&nport->nport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
kfree(nport);
}
@@ -1362,8 +1367,6 @@ __unlink_remote_port(struct fcloop_nport *nport)
nport->tport->remoteport = NULL;
nport->rport = NULL;
- list_del(&nport->nport_list);
-
return rport;
}
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
2025-03-18 10:39 ` [PATCH v3 01/18] nvmet-fcloop: remove nport from list on last user Daniel Wagner
@ 2025-03-18 10:39 ` Daniel Wagner
2025-03-18 10:56 ` Hannes Reinecke
` (2 more replies)
2025-03-18 10:39 ` [PATCH v3 03/18] nvmet-fcloop: add ref counting to lport Daniel Wagner
` (15 subsequent siblings)
17 siblings, 3 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:39 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
The kref wrapper is not really adding any value ontop of refcount. Thus
replace the kref API with the refcount API.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 37 +++++++++++++------------------------
1 file changed, 13 insertions(+), 24 deletions(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 09546c3161fd9828234e58641de3e53519e27824..cfce70d1b11ff305b203d716f78fad23f114e9c3 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -239,7 +239,7 @@ struct fcloop_nport {
struct fcloop_tport *tport;
struct fcloop_lport *lport;
struct list_head nport_list;
- struct kref ref;
+ refcount_t ref;
u64 node_name;
u64 port_name;
u32 port_role;
@@ -273,7 +273,7 @@ struct fcloop_fcpreq {
u32 inistate;
bool active;
bool aborted;
- struct kref ref;
+ refcount_t ref;
struct work_struct fcp_rcv_work;
struct work_struct abort_rcv_work;
struct work_struct tio_done_work;
@@ -533,24 +533,18 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
}
static void
-fcloop_tfcp_req_free(struct kref *ref)
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
{
- struct fcloop_fcpreq *tfcp_req =
- container_of(ref, struct fcloop_fcpreq, ref);
+ if (!refcount_dec_and_test(&tfcp_req->ref))
+ return;
kfree(tfcp_req);
}
-static void
-fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
-{
- kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
-}
-
static int
fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
{
- return kref_get_unless_zero(&tfcp_req->ref);
+ return refcount_inc_not_zero(&tfcp_req->ref);
}
static void
@@ -747,7 +741,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
- kref_init(&tfcp_req->ref);
+ refcount_set(&tfcp_req->ref, 1);
queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
@@ -1000,12 +994,13 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
}
static void
-fcloop_nport_free(struct kref *ref)
+fcloop_nport_put(struct fcloop_nport *nport)
{
- struct fcloop_nport *nport =
- container_of(ref, struct fcloop_nport, ref);
unsigned long flags;
+ if (!refcount_dec_and_test(&nport->ref))
+ return;
+
spin_lock_irqsave(&fcloop_lock, flags);
list_del(&nport->nport_list);
spin_unlock_irqrestore(&fcloop_lock, flags);
@@ -1013,16 +1008,10 @@ fcloop_nport_free(struct kref *ref)
kfree(nport);
}
-static void
-fcloop_nport_put(struct fcloop_nport *nport)
-{
- kref_put(&nport->ref, fcloop_nport_free);
-}
-
static int
fcloop_nport_get(struct fcloop_nport *nport)
{
- return kref_get_unless_zero(&nport->ref);
+ return refcount_inc_not_zero(&nport->ref);
}
static void
@@ -1253,7 +1242,7 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
newnport->port_role = opts->roles;
if (opts->mask & NVMF_OPT_FCADDR)
newnport->port_id = opts->fcaddr;
- kref_init(&newnport->ref);
+ refcount_set(&newnport->ref, 1);
spin_lock_irqsave(&fcloop_lock, flags);
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount
2025-03-18 10:39 ` [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount Daniel Wagner
@ 2025-03-18 10:56 ` Hannes Reinecke
2025-04-02 14:03 ` Daniel Wagner
2025-03-21 6:03 ` Christoph Hellwig
2025-03-21 14:16 ` Hannes Reinecke
2 siblings, 1 reply; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 10:56 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:39, Daniel Wagner wrote:
> The kref wrapper is not really adding any value ontop of refcount. Thus
> replace the kref API with the refcount API.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 37 +++++++++++++------------------------
> 1 file changed, 13 insertions(+), 24 deletions(-)
>
Can you split this in two, one for the nport and one for fcpreq?
That way it's easier to follow what has been modified.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount
2025-03-18 10:56 ` Hannes Reinecke
@ 2025-04-02 14:03 ` Daniel Wagner
2025-04-02 14:06 ` Hannes Reinecke
0 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-04-02 14:03 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:56:50AM +0100, Hannes Reinecke wrote:
> On 3/18/25 11:39, Daniel Wagner wrote:
> > The kref wrapper is not really adding any value ontop of refcount. Thus
> > replace the kref API with the refcount API.
> >
> > Signed-off-by: Daniel Wagner <wagi@kernel.org>
> > ---
> > drivers/nvme/target/fcloop.c | 37 +++++++++++++------------------------
> > 1 file changed, 13 insertions(+), 24 deletions(-)
> >
> Can you split this in two, one for the nport and one for fcpreq?
> That way it's easier to follow what has been modified.
Do you still want me to split the patch? You and Christoph have sent the
Reviewed-by tag after this review.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount
2025-04-02 14:03 ` Daniel Wagner
@ 2025-04-02 14:06 ` Hannes Reinecke
0 siblings, 0 replies; 62+ messages in thread
From: Hannes Reinecke @ 2025-04-02 14:06 UTC (permalink / raw)
To: Daniel Wagner
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On 4/2/25 16:03, Daniel Wagner wrote:
> On Tue, Mar 18, 2025 at 11:56:50AM +0100, Hannes Reinecke wrote:
>> On 3/18/25 11:39, Daniel Wagner wrote:
>>> The kref wrapper is not really adding any value ontop of refcount. Thus
>>> replace the kref API with the refcount API.
>>>
>>> Signed-off-by: Daniel Wagner <wagi@kernel.org>
>>> ---
>>> drivers/nvme/target/fcloop.c | 37 +++++++++++++------------------------
>>> 1 file changed, 13 insertions(+), 24 deletions(-)
>>>
>> Can you split this in two, one for the nport and one for fcpreq?
>> That way it's easier to follow what has been modified.
>
> Do you still want me to split the patch? You and Christoph have sent the
> Reviewed-by tag after this review.
Ah, no, Don't bother.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount
2025-03-18 10:39 ` [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount Daniel Wagner
2025-03-18 10:56 ` Hannes Reinecke
@ 2025-03-21 6:03 ` Christoph Hellwig
2025-03-21 14:16 ` Hannes Reinecke
2 siblings, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:03 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount
2025-03-18 10:39 ` [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount Daniel Wagner
2025-03-18 10:56 ` Hannes Reinecke
2025-03-21 6:03 ` Christoph Hellwig
@ 2025-03-21 14:16 ` Hannes Reinecke
2 siblings, 0 replies; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-21 14:16 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:39, Daniel Wagner wrote:
> The kref wrapper is not really adding any value ontop of refcount. Thus
> replace the kref API with the refcount API.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 37 +++++++++++++------------------------
> 1 file changed, 13 insertions(+), 24 deletions(-)
>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 03/18] nvmet-fcloop: add ref counting to lport
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
2025-03-18 10:39 ` [PATCH v3 01/18] nvmet-fcloop: remove nport from list on last user Daniel Wagner
2025-03-18 10:39 ` [PATCH v3 02/18] nvmet-fcloop: replace kref with refcount Daniel Wagner
@ 2025-03-18 10:39 ` Daniel Wagner
2025-03-21 6:04 ` Christoph Hellwig
2025-03-18 10:39 ` [PATCH v3 04/18] nvmet-fcloop: refactor fcloop_nport_alloc Daniel Wagner
` (14 subsequent siblings)
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:39 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
The fcloop_lport objects live time is controlled by the user interface
add_local_port and del_local_port. nport, rport and tport objects are
pointing to the lport objects but here is no clear tracking. Let's
introduce an explicit ref counter for the lport objects and prepare the
stage for restructuring how lports are used.
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 44 +++++++++++++++++++++++++++++++-------------
1 file changed, 31 insertions(+), 13 deletions(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index cfce70d1b11ff305b203d716f78fad23f114e9c3..c6d5a31ce0b5ccb10988e339ae22d528e06d5e2b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -208,6 +208,7 @@ struct fcloop_lport {
struct nvme_fc_local_port *localport;
struct list_head lport_list;
struct completion unreg_done;
+ refcount_t ref;
};
struct fcloop_lport_priv {
@@ -993,6 +994,27 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
}
}
+static void
+fcloop_lport_put(struct fcloop_lport *lport)
+{
+ unsigned long flags;
+
+ if (!refcount_dec_and_test(&lport->ref))
+ return;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_del(&lport->lport_list);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ kfree(lport);
+}
+
+static int
+fcloop_lport_get(struct fcloop_lport *lport)
+{
+ return refcount_inc_not_zero(&lport->ref);
+}
+
static void
fcloop_nport_put(struct fcloop_nport *nport)
{
@@ -1022,6 +1044,8 @@ fcloop_localport_delete(struct nvme_fc_local_port *localport)
/* release any threads waiting for the unreg to complete */
complete(&lport->unreg_done);
+
+ fcloop_lport_put(lport);
}
static void
@@ -1133,6 +1157,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
lport->localport = localport;
INIT_LIST_HEAD(&lport->lport_list);
+ refcount_set(&lport->ref, 1);
spin_lock_irqsave(&fcloop_lock, flags);
list_add_tail(&lport->lport_list, &fcloop_lports);
@@ -1149,13 +1174,6 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
return ret ? ret : count;
}
-
-static void
-__unlink_local_port(struct fcloop_lport *lport)
-{
- list_del(&lport->lport_list);
-}
-
static int
__wait_localport_unreg(struct fcloop_lport *lport)
{
@@ -1168,8 +1186,6 @@ __wait_localport_unreg(struct fcloop_lport *lport)
if (!ret)
wait_for_completion(&lport->unreg_done);
- kfree(lport);
-
return ret;
}
@@ -1192,8 +1208,9 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
list_for_each_entry(tlport, &fcloop_lports, lport_list) {
if (tlport->localport->node_name == nodename &&
tlport->localport->port_name == portname) {
+ if (!fcloop_lport_get(tlport))
+ break;
lport = tlport;
- __unlink_local_port(lport);
break;
}
}
@@ -1203,6 +1220,7 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
return -ENOENT;
ret = __wait_localport_unreg(lport);
+ fcloop_lport_put(lport);
return ret ? ret : count;
}
@@ -1628,17 +1646,17 @@ static void __exit fcloop_exit(void)
for (;;) {
lport = list_first_entry_or_null(&fcloop_lports,
typeof(*lport), lport_list);
- if (!lport)
+ if (!lport || !fcloop_lport_get(lport))
break;
- __unlink_local_port(lport);
-
spin_unlock_irqrestore(&fcloop_lock, flags);
ret = __wait_localport_unreg(lport);
if (ret)
pr_warn("%s: Failed deleting local port\n", __func__);
+ fcloop_lport_put(lport);
+
spin_lock_irqsave(&fcloop_lock, flags);
}
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* [PATCH v3 04/18] nvmet-fcloop: refactor fcloop_nport_alloc
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (2 preceding siblings ...)
2025-03-18 10:39 ` [PATCH v3 03/18] nvmet-fcloop: add ref counting to lport Daniel Wagner
@ 2025-03-18 10:39 ` Daniel Wagner
2025-03-18 11:02 ` Hannes Reinecke
2025-03-18 10:39 ` [PATCH v3 05/18] nvmet-fcloop: track ref counts for nports Daniel Wagner
` (13 subsequent siblings)
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:39 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
There are many different operations done under the spin lock. Since the
lport and nport are ref counted it's possible to use the spin lock only
for the list insert and lookup.
This allows us to untangle the setup steps into a more linear form which
reduces the complexity of the functions.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 156 +++++++++++++++++++++++--------------------
1 file changed, 84 insertions(+), 72 deletions(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index c6d5a31ce0b5ccb10988e339ae22d528e06d5e2b..245bfe08d91ec81f1979251e8c757a0d46fd09e9 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1027,6 +1027,8 @@ fcloop_nport_put(struct fcloop_nport *nport)
list_del(&nport->nport_list);
spin_unlock_irqrestore(&fcloop_lock, flags);
+ if (nport->lport)
+ fcloop_lport_put(nport->lport);
kfree(nport);
}
@@ -1189,33 +1191,63 @@ __wait_localport_unreg(struct fcloop_lport *lport)
return ret;
}
+static struct fcloop_lport *
+fcloop_lport_lookup(u64 node_name, u64 port_name)
+{
+ struct fcloop_lport *lp, *lport = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_for_each_entry(lp, &fcloop_lports, lport_list) {
+ if (lp->localport->node_name != node_name ||
+ lp->localport->port_name != port_name)
+ continue;
+
+ if (fcloop_lport_get(lp))
+ lport = lp;
+
+ break;
+ }
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ return lport;
+}
+
+static struct fcloop_nport *
+fcloop_nport_lookup(u64 node_name, u64 port_name)
+{
+ struct fcloop_nport *np, *nport = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_for_each_entry(np, &fcloop_nports, nport_list) {
+ if (np->node_name != node_name ||
+ np->port_name != port_name)
+ continue;
+
+ if (fcloop_nport_get(np))
+ nport = np;
+
+ break;
+ }
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ return nport;
+}
static ssize_t
fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct fcloop_lport *tlport, *lport = NULL;
+ struct fcloop_lport *lport;
u64 nodename, portname;
- unsigned long flags;
int ret;
ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
if (ret)
return ret;
- spin_lock_irqsave(&fcloop_lock, flags);
-
- list_for_each_entry(tlport, &fcloop_lports, lport_list) {
- if (tlport->localport->node_name == nodename &&
- tlport->localport->port_name == portname) {
- if (!fcloop_lport_get(tlport))
- break;
- lport = tlport;
- break;
- }
- }
- spin_unlock_irqrestore(&fcloop_lock, flags);
-
+ lport = fcloop_lport_lookup(nodename, portname);
if (!lport)
return -ENOENT;
@@ -1228,9 +1260,9 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
static struct fcloop_nport *
fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
{
- struct fcloop_nport *newnport, *nport = NULL;
- struct fcloop_lport *tmplport, *lport = NULL;
struct fcloop_ctrl_options *opts;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
unsigned long flags;
u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
int ret;
@@ -1244,79 +1276,59 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
goto out_free_opts;
/* everything there ? */
- if ((opts->mask & opts_mask) != opts_mask) {
- ret = -EINVAL;
+ if ((opts->mask & opts_mask) != opts_mask)
goto out_free_opts;
- }
- newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
- if (!newnport)
+ lport = fcloop_lport_lookup(opts->wwnn, opts->wwpn);
+ if (lport) {
+ /* invalid configuration */
+ fcloop_lport_put(lport);
goto out_free_opts;
+ }
- INIT_LIST_HEAD(&newnport->nport_list);
- newnport->node_name = opts->wwnn;
- newnport->port_name = opts->wwpn;
- if (opts->mask & NVMF_OPT_ROLES)
- newnport->port_role = opts->roles;
- if (opts->mask & NVMF_OPT_FCADDR)
- newnport->port_id = opts->fcaddr;
- refcount_set(&newnport->ref, 1);
+ nport = fcloop_nport_lookup(opts->wwnn, opts->wwpn);
+ if (nport && ((remoteport && nport->rport) ||
+ (!remoteport && nport->tport))) {
+ /* invalid configuration */
+ goto out_put_nport;
+ }
- spin_lock_irqsave(&fcloop_lock, flags);
+ if (!nport) {
+ nport = kzalloc(sizeof(*nport), GFP_KERNEL);
+ if (!nport)
+ goto out_free_opts;
- list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
- if (tmplport->localport->node_name == opts->wwnn &&
- tmplport->localport->port_name == opts->wwpn)
- goto out_invalid_opts;
+ INIT_LIST_HEAD(&nport->nport_list);
+ nport->node_name = opts->wwnn;
+ nport->port_name = opts->wwpn;
+ refcount_set(&nport->ref, 1);
- if (tmplport->localport->node_name == opts->lpwwnn &&
- tmplport->localport->port_name == opts->lpwwpn)
- lport = tmplport;
+ spin_lock_irqsave(&fcloop_lock, flags);
+ list_add_tail(&nport->nport_list, &fcloop_nports);
+ spin_unlock_irqrestore(&fcloop_lock, flags);
}
+ if (opts->mask & NVMF_OPT_ROLES)
+ nport->port_role = opts->roles;
+ if (opts->mask & NVMF_OPT_FCADDR)
+ nport->port_id = opts->fcaddr;
+
if (remoteport) {
+ lport = fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn);
if (!lport)
- goto out_invalid_opts;
- newnport->lport = lport;
- }
-
- list_for_each_entry(nport, &fcloop_nports, nport_list) {
- if (nport->node_name == opts->wwnn &&
- nport->port_name == opts->wwpn) {
- if ((remoteport && nport->rport) ||
- (!remoteport && nport->tport)) {
- nport = NULL;
- goto out_invalid_opts;
- }
-
- fcloop_nport_get(nport);
+ goto out_put_nport;
- spin_unlock_irqrestore(&fcloop_lock, flags);
-
- if (remoteport)
- nport->lport = lport;
- if (opts->mask & NVMF_OPT_ROLES)
- nport->port_role = opts->roles;
- if (opts->mask & NVMF_OPT_FCADDR)
- nport->port_id = opts->fcaddr;
- goto out_free_newnport;
- }
+ nport->lport = lport;
}
- list_add_tail(&newnport->nport_list, &fcloop_nports);
-
- spin_unlock_irqrestore(&fcloop_lock, flags);
-
kfree(opts);
- return newnport;
+ return nport;
-out_invalid_opts:
- spin_unlock_irqrestore(&fcloop_lock, flags);
-out_free_newnport:
- kfree(newnport);
+out_put_nport:
+ fcloop_nport_put(nport);
out_free_opts:
kfree(opts);
- return nport;
+ return NULL;
}
static ssize_t
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 04/18] nvmet-fcloop: refactor fcloop_nport_alloc
2025-03-18 10:39 ` [PATCH v3 04/18] nvmet-fcloop: refactor fcloop_nport_alloc Daniel Wagner
@ 2025-03-18 11:02 ` Hannes Reinecke
2025-03-18 13:38 ` Daniel Wagner
0 siblings, 1 reply; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:02 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:39, Daniel Wagner wrote:
> There are many different operations done under the spin lock. Since the
> lport and nport are ref counted it's possible to use the spin lock only
> for the list insert and lookup.
>
> This allows us to untangle the setup steps into a more linear form which
> reduces the complexity of the functions.
>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 156 +++++++++++++++++++++++--------------------
> 1 file changed, 84 insertions(+), 72 deletions(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index c6d5a31ce0b5ccb10988e339ae22d528e06d5e2b..245bfe08d91ec81f1979251e8c757a0d46fd09e9 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -1027,6 +1027,8 @@ fcloop_nport_put(struct fcloop_nport *nport)
> list_del(&nport->nport_list);
> spin_unlock_irqrestore(&fcloop_lock, flags);
>
> + if (nport->lport)
> + fcloop_lport_put(nport->lport);
> kfree(nport);
> }
>
> @@ -1189,33 +1191,63 @@ __wait_localport_unreg(struct fcloop_lport *lport)
> return ret;
> }
>
> +static struct fcloop_lport *
> +fcloop_lport_lookup(u64 node_name, u64 port_name)
> +{
> + struct fcloop_lport *lp, *lport = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&fcloop_lock, flags);
> + list_for_each_entry(lp, &fcloop_lports, lport_list) {
> + if (lp->localport->node_name != node_name ||
> + lp->localport->port_name != port_name)
> + continue;
> +
> + if (fcloop_lport_get(lp))
> + lport = lp;
> +
> + break;
> + }
> + spin_unlock_irqrestore(&fcloop_lock, flags);
> +
> + return lport;
> +}
> +
> +static struct fcloop_nport *
> +fcloop_nport_lookup(u64 node_name, u64 port_name)
> +{
> + struct fcloop_nport *np, *nport = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&fcloop_lock, flags);
> + list_for_each_entry(np, &fcloop_nports, nport_list) {
> + if (np->node_name != node_name ||
> + np->port_name != port_name)
> + continue;
> +
> + if (fcloop_nport_get(np))
> + nport = np;
> +
> + break;
> + }
> + spin_unlock_irqrestore(&fcloop_lock, flags);
> +
> + return nport;
> +}
>
> static ssize_t
> fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
> const char *buf, size_t count)
> {
> - struct fcloop_lport *tlport, *lport = NULL;
> + struct fcloop_lport *lport;
> u64 nodename, portname;
> - unsigned long flags;
> int ret;
>
> ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
> if (ret)
> return ret;
>
> - spin_lock_irqsave(&fcloop_lock, flags);
> -
> - list_for_each_entry(tlport, &fcloop_lports, lport_list) {
> - if (tlport->localport->node_name == nodename &&
> - tlport->localport->port_name == portname) {
> - if (!fcloop_lport_get(tlport))
> - break;
> - lport = tlport;
> - break;
> - }
> - }
> - spin_unlock_irqrestore(&fcloop_lock, flags);
> -
> + lport = fcloop_lport_lookup(nodename, portname);
> if (!lport)
> return -ENOENT;
>
> @@ -1228,9 +1260,9 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
> static struct fcloop_nport *
> fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
> {
> - struct fcloop_nport *newnport, *nport = NULL;
> - struct fcloop_lport *tmplport, *lport = NULL;
> struct fcloop_ctrl_options *opts;
> + struct fcloop_nport *nport;
> + struct fcloop_lport *lport;
> unsigned long flags;
> u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
> int ret;
> @@ -1244,79 +1276,59 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
> goto out_free_opts;
>
> /* everything there ? */
> - if ((opts->mask & opts_mask) != opts_mask) {
> - ret = -EINVAL;
> + if ((opts->mask & opts_mask) != opts_mask)
> goto out_free_opts;
> - }
>
> - newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
> - if (!newnport)
> + lport = fcloop_lport_lookup(opts->wwnn, opts->wwpn);
> + if (lport) {
> + /* invalid configuration */
> + fcloop_lport_put(lport);
> goto out_free_opts;
> + }
>
> - INIT_LIST_HEAD(&newnport->nport_list);
> - newnport->node_name = opts->wwnn;
> - newnport->port_name = opts->wwpn;
> - if (opts->mask & NVMF_OPT_ROLES)
> - newnport->port_role = opts->roles;
> - if (opts->mask & NVMF_OPT_FCADDR)
> - newnport->port_id = opts->fcaddr;
> - refcount_set(&newnport->ref, 1);
> + nport = fcloop_nport_lookup(opts->wwnn, opts->wwpn);
> + if (nport && ((remoteport && nport->rport) ||
> + (!remoteport && nport->tport))) {
> + /* invalid configuration */
> + goto out_put_nport;
> + }
>
> - spin_lock_irqsave(&fcloop_lock, flags);
> + if (!nport) {
> + nport = kzalloc(sizeof(*nport), GFP_KERNEL);
> + if (!nport)
> + goto out_free_opts;
>
> - list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
> - if (tmplport->localport->node_name == opts->wwnn &&
> - tmplport->localport->port_name == opts->wwpn)
> - goto out_invalid_opts;
> + INIT_LIST_HEAD(&nport->nport_list);
> + nport->node_name = opts->wwnn;
> + nport->port_name = opts->wwpn;
> + refcount_set(&nport->ref, 1);
>
> - if (tmplport->localport->node_name == opts->lpwwnn &&
> - tmplport->localport->port_name == opts->lpwwpn)
> - lport = tmplport;
> + spin_lock_irqsave(&fcloop_lock, flags);
> + list_add_tail(&nport->nport_list, &fcloop_nports);
> + spin_unlock_irqrestore(&fcloop_lock, flags);
> }
>
Hmm. I don't really like this pattern; there is a race condition
between lookup and allocation leading to possibly duplicate entries
on the list.
Lookup and allocation really need to be under the same lock.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 04/18] nvmet-fcloop: refactor fcloop_nport_alloc
2025-03-18 11:02 ` Hannes Reinecke
@ 2025-03-18 13:38 ` Daniel Wagner
2025-03-18 14:10 ` Hannes Reinecke
2025-03-21 6:05 ` Christoph Hellwig
0 siblings, 2 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 13:38 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 12:02:48PM +0100, Hannes Reinecke wrote:
> > - list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
> > - if (tmplport->localport->node_name == opts->wwnn &&
> > - tmplport->localport->port_name == opts->wwpn)
> > - goto out_invalid_opts;
> > + INIT_LIST_HEAD(&nport->nport_list);
> > + nport->node_name = opts->wwnn;
> > + nport->port_name = opts->wwpn;
> > + refcount_set(&nport->ref, 1);
> > - if (tmplport->localport->node_name == opts->lpwwnn &&
> > - tmplport->localport->port_name == opts->lpwwpn)
> > - lport = tmplport;
> > + spin_lock_irqsave(&fcloop_lock, flags);
> > + list_add_tail(&nport->nport_list, &fcloop_nports);
> > + spin_unlock_irqrestore(&fcloop_lock, flags);
> > }
> Hmm. I don't really like this pattern; there is a race condition
> between lookup and allocation leading to possibly duplicate entries
> on the list.
Yes, that's not a good thing.
> Lookup and allocation really need to be under the same lock.
This means the new entry has always to be allocated first and then we
either free it again or insert into the list, because it's not possible
to allocate under the spinlock. Not that beautiful but correctness wins.
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 04/18] nvmet-fcloop: refactor fcloop_nport_alloc
2025-03-18 13:38 ` Daniel Wagner
@ 2025-03-18 14:10 ` Hannes Reinecke
2025-03-21 6:05 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 14:10 UTC (permalink / raw)
To: Daniel Wagner
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On 3/18/25 14:38, Daniel Wagner wrote:
> On Tue, Mar 18, 2025 at 12:02:48PM +0100, Hannes Reinecke wrote:
>>> - list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
>>> - if (tmplport->localport->node_name == opts->wwnn &&
>>> - tmplport->localport->port_name == opts->wwpn)
>>> - goto out_invalid_opts;
>>> + INIT_LIST_HEAD(&nport->nport_list);
>>> + nport->node_name = opts->wwnn;
>>> + nport->port_name = opts->wwpn;
>>> + refcount_set(&nport->ref, 1);
>>> - if (tmplport->localport->node_name == opts->lpwwnn &&
>>> - tmplport->localport->port_name == opts->lpwwpn)
>>> - lport = tmplport;
>>> + spin_lock_irqsave(&fcloop_lock, flags);
>>> + list_add_tail(&nport->nport_list, &fcloop_nports);
>>> + spin_unlock_irqrestore(&fcloop_lock, flags);
>>> }
>> Hmm. I don't really like this pattern; there is a race condition
>> between lookup and allocation leading to possibly duplicate entries
>> on the list.
>
> Yes, that's not a good thing.
>
>> Lookup and allocation really need to be under the same lock.
>
> This means the new entry has always to be allocated first and then we
> either free it again or insert into the list, because it's not possible
> to allocate under the spinlock. Not that beautiful but correctness wins.
Allocate first, and then free it if the entry is already present.
Slightly wasteful, but that's what it is.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 04/18] nvmet-fcloop: refactor fcloop_nport_alloc
2025-03-18 13:38 ` Daniel Wagner
2025-03-18 14:10 ` Hannes Reinecke
@ 2025-03-21 6:05 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:05 UTC (permalink / raw)
To: Daniel Wagner
Cc: Hannes Reinecke, Daniel Wagner, James Smart, Christoph Hellwig,
Sagi Grimberg, Chaitanya Kulkarni, Keith Busch, linux-nvme,
linux-kernel
On Tue, Mar 18, 2025 at 02:38:56PM +0100, Daniel Wagner wrote:
> This means the new entry has always to be allocated first and then we
> either free it again or insert into the list, because it's not possible
> to allocate under the spinlock. Not that beautiful but correctness wins.
Yes, we do that a lot. And if finding an existing entry is the more
common outcome (I don't think it is here, but I'm not sure), you do
a search first, allocate, and then search again.
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 05/18] nvmet-fcloop: track ref counts for nports
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (3 preceding siblings ...)
2025-03-18 10:39 ` [PATCH v3 04/18] nvmet-fcloop: refactor fcloop_nport_alloc Daniel Wagner
@ 2025-03-18 10:39 ` Daniel Wagner
2025-03-18 11:06 ` Hannes Reinecke
2025-03-18 10:40 ` [PATCH v3 06/18] nvmet-fcloop: sync targetport removal Daniel Wagner
` (12 subsequent siblings)
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:39 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
A nport object is always used in association with targerport,
remoteport, tport and rport objects. Add explicit references for any of
the associated object. This ensures that nport is not removed too early
on shutdown sequences.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 106 +++++++++++++++++++++++++------------------
1 file changed, 63 insertions(+), 43 deletions(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 245bfe08d91ec81f1979251e8c757a0d46fd09e9..69121a5f0f280936d1b720e9e994d6e5eb9186ff 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1054,8 +1054,15 @@ static void
fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
+ unsigned long flags;
flush_work(&rport->ls_work);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ rport->nport->rport = NULL;
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ /* nport ref put: rport */
fcloop_nport_put(rport->nport);
}
@@ -1063,8 +1070,15 @@ static void
fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
struct fcloop_tport *tport = targetport->private;
+ unsigned long flags;
flush_work(&tport->ls_work);
+
+ spin_lock_irqsave(&fcloop_lock, flags);
+ tport->nport->tport = NULL;
+ spin_unlock_irqrestore(&fcloop_lock, flags);
+
+ /* nport ref put: tport */
fcloop_nport_put(tport->nport);
}
@@ -1341,6 +1355,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
struct nvme_fc_port_info pinfo;
int ret;
+ /* nport ref get: rport */
nport = fcloop_alloc_nport(buf, count, true);
if (!nport)
return -EIO;
@@ -1382,6 +1397,8 @@ __unlink_remote_port(struct fcloop_nport *nport)
{
struct fcloop_rport *rport = nport->rport;
+ lockdep_assert_held(&fcloop_lock);
+
if (rport && nport->tport)
nport->tport->remoteport = NULL;
nport->rport = NULL;
@@ -1392,9 +1409,6 @@ __unlink_remote_port(struct fcloop_nport *nport)
static int
__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
{
- if (!rport)
- return -EALREADY;
-
return nvme_fc_unregister_remoteport(rport->remoteport);
}
@@ -1402,8 +1416,8 @@ static ssize_t
fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct fcloop_nport *nport = NULL, *tmpport;
- static struct fcloop_rport *rport;
+ struct fcloop_nport *nport;
+ struct fcloop_rport *rport;
u64 nodename, portname;
unsigned long flags;
int ret;
@@ -1412,24 +1426,24 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- spin_lock_irqsave(&fcloop_lock, flags);
-
- list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
- if (tmpport->node_name == nodename &&
- tmpport->port_name == portname && tmpport->rport) {
- nport = tmpport;
- rport = __unlink_remote_port(nport);
- break;
- }
- }
+ nport = fcloop_nport_lookup(nodename, portname);
+ if (!nport)
+ return -ENOENT;
+ spin_lock_irqsave(&fcloop_lock, flags);
+ rport = __unlink_remote_port(nport);
spin_unlock_irqrestore(&fcloop_lock, flags);
- if (!nport)
- return -ENOENT;
+ if (!rport) {
+ ret = -ENOENT;
+ goto out_nport_put;
+ }
ret = __remoteport_unreg(nport, rport);
+out_nport_put:
+ fcloop_nport_put(nport);
+
return ret ? ret : count;
}
@@ -1443,6 +1457,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
struct nvmet_fc_port_info tinfo;
int ret;
+ /* nport ref get: tport */
nport = fcloop_alloc_nport(buf, count, false);
if (!nport)
return -EIO;
@@ -1480,6 +1495,8 @@ __unlink_target_port(struct fcloop_nport *nport)
{
struct fcloop_tport *tport = nport->tport;
+ lockdep_assert_held(&fcloop_lock);
+
if (tport && nport->rport)
nport->rport->targetport = NULL;
nport->tport = NULL;
@@ -1490,9 +1507,6 @@ __unlink_target_port(struct fcloop_nport *nport)
static int
__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
{
- if (!tport)
- return -EALREADY;
-
return nvmet_fc_unregister_targetport(tport->targetport);
}
@@ -1500,8 +1514,8 @@ static ssize_t
fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct fcloop_nport *nport = NULL, *tmpport;
- struct fcloop_tport *tport = NULL;
+ struct fcloop_nport *nport;
+ struct fcloop_tport *tport;
u64 nodename, portname;
unsigned long flags;
int ret;
@@ -1510,24 +1524,24 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- spin_lock_irqsave(&fcloop_lock, flags);
-
- list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
- if (tmpport->node_name == nodename &&
- tmpport->port_name == portname && tmpport->tport) {
- nport = tmpport;
- tport = __unlink_target_port(nport);
- break;
- }
- }
+ nport = fcloop_nport_lookup(nodename, portname);
+ if (!nport)
+ return -ENOENT;
+ spin_lock_irqsave(&fcloop_lock, flags);
+ tport = __unlink_target_port(nport);
spin_unlock_irqrestore(&fcloop_lock, flags);
- if (!nport)
- return -ENOENT;
+ if (!tport) {
+ ret = -ENOENT;
+ goto out_nport_put;
+ }
ret = __targetport_unreg(nport, tport);
+out_nport_put:
+ fcloop_nport_put(nport);
+
return ret ? ret : count;
}
@@ -1624,8 +1638,8 @@ static int __init fcloop_init(void)
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport = NULL;
- struct fcloop_nport *nport = NULL;
+ struct fcloop_lport *lport;
+ struct fcloop_nport *nport;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
@@ -1636,7 +1650,7 @@ static void __exit fcloop_exit(void)
for (;;) {
nport = list_first_entry_or_null(&fcloop_nports,
typeof(*nport), nport_list);
- if (!nport)
+ if (!nport || !fcloop_nport_get(nport))
break;
tport = __unlink_target_port(nport);
@@ -1644,13 +1658,19 @@ static void __exit fcloop_exit(void)
spin_unlock_irqrestore(&fcloop_lock, flags);
- ret = __targetport_unreg(nport, tport);
- if (ret)
- pr_warn("%s: Failed deleting target port\n", __func__);
+ if (tport) {
+ ret = __targetport_unreg(nport, tport);
+ if (ret)
+ pr_warn("%s: Failed deleting target port\n", __func__);
+ }
- ret = __remoteport_unreg(nport, rport);
- if (ret)
- pr_warn("%s: Failed deleting remote port\n", __func__);
+ if (rport) {
+ ret = __remoteport_unreg(nport, rport);
+ if (ret)
+ pr_warn("%s: Failed deleting remote port\n", __func__);
+ }
+
+ fcloop_nport_put(nport);
spin_lock_irqsave(&fcloop_lock, flags);
}
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 05/18] nvmet-fcloop: track ref counts for nports
2025-03-18 10:39 ` [PATCH v3 05/18] nvmet-fcloop: track ref counts for nports Daniel Wagner
@ 2025-03-18 11:06 ` Hannes Reinecke
2025-03-18 13:44 ` Daniel Wagner
0 siblings, 1 reply; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:06 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:39, Daniel Wagner wrote:
> A nport object is always used in association with targerport,
> remoteport, tport and rport objects. Add explicit references for any of
> the associated object. This ensures that nport is not removed too early
> on shutdown sequences.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 106 +++++++++++++++++++++++++------------------
> 1 file changed, 63 insertions(+), 43 deletions(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index 245bfe08d91ec81f1979251e8c757a0d46fd09e9..69121a5f0f280936d1b720e9e994d6e5eb9186ff 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -1054,8 +1054,15 @@ static void
> fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
> {
> struct fcloop_rport *rport = remoteport->private;
> + unsigned long flags;
>
> flush_work(&rport->ls_work);
> +
> + spin_lock_irqsave(&fcloop_lock, flags);
> + rport->nport->rport = NULL;
> + spin_unlock_irqrestore(&fcloop_lock, flags);
> +
> + /* nport ref put: rport */
> fcloop_nport_put(rport->nport);
> }
>
The comment is a bit odd; obviously fcloop_nport_put() puts the nport
reference for the rport.
Maybe just remove them?
Otherwise looks good.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 05/18] nvmet-fcloop: track ref counts for nports
2025-03-18 11:06 ` Hannes Reinecke
@ 2025-03-18 13:44 ` Daniel Wagner
0 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 13:44 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 12:06:54PM +0100, Hannes Reinecke wrote:
> On 3/18/25 11:39, Daniel Wagner wrote:
> > A nport object is always used in association with targerport,
> > remoteport, tport and rport objects. Add explicit references for any of
> > the associated object. This ensures that nport is not removed too early
> > on shutdown sequences.
> >
> > Signed-off-by: Daniel Wagner <wagi@kernel.org>
> > ---
> > drivers/nvme/target/fcloop.c | 106 +++++++++++++++++++++++++------------------
> > 1 file changed, 63 insertions(+), 43 deletions(-)
> >
> > diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> > index 245bfe08d91ec81f1979251e8c757a0d46fd09e9..69121a5f0f280936d1b720e9e994d6e5eb9186ff 100644
> > --- a/drivers/nvme/target/fcloop.c
> > +++ b/drivers/nvme/target/fcloop.c
> > @@ -1054,8 +1054,15 @@ static void
> > fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
> > {
> > struct fcloop_rport *rport = remoteport->private;
> > + unsigned long flags;
> > flush_work(&rport->ls_work);
> > +
> > + spin_lock_irqsave(&fcloop_lock, flags);
> > + rport->nport->rport = NULL;
> > + spin_unlock_irqrestore(&fcloop_lock, flags);
> > +
> > + /* nport ref put: rport */
> > fcloop_nport_put(rport->nport);
> > }
> The comment is a bit odd; obviously fcloop_nport_put() puts the nport
> reference for the rport.
> Maybe just remove them?
Sure, I left in it, to figure out which of the get/put pair up. This was
not so clear during the various stages of this series. Now that there is
a rport->nport and tport->nport is left, it's indeed a bit useles.
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 06/18] nvmet-fcloop: sync targetport removal
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (4 preceding siblings ...)
2025-03-18 10:39 ` [PATCH v3 05/18] nvmet-fcloop: track ref counts for nports Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:07 ` Hannes Reinecke
2025-03-21 6:08 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 07/18] nvmet-fcloop: update refs on tfcp_req Daniel Wagner
` (11 subsequent siblings)
17 siblings, 2 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
The nvmet-fc uses references on the targetport to ensure no UAFs
happens. The consequence is that when the targetport is unregistered,
not all resources are freed immediately. Ensure that all activities from
the unregister call have been submitted (deassocication) before
continuing with the shutdown sequence.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 69121a5f0f280936d1b720e9e994d6e5eb9186ff..cddaa424bb3ff62156cef14c787fdcb33c15d76e 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -239,6 +239,7 @@ struct fcloop_nport {
struct fcloop_rport *rport;
struct fcloop_tport *tport;
struct fcloop_lport *lport;
+ struct completion tport_unreg_done;
struct list_head nport_list;
refcount_t ref;
u64 node_name;
@@ -1078,6 +1079,8 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
tport->nport->tport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags);
+ complete(&tport->nport->tport_unreg_done);
+
/* nport ref put: tport */
fcloop_nport_put(tport->nport);
}
@@ -1507,7 +1510,17 @@ __unlink_target_port(struct fcloop_nport *nport)
static int
__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
{
- return nvmet_fc_unregister_targetport(tport->targetport);
+ int ret;
+
+ init_completion(&nport->tport_unreg_done);
+
+ ret = nvmet_fc_unregister_targetport(tport->targetport);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&nport->tport_unreg_done);
+
+ return 0;
}
static ssize_t
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 06/18] nvmet-fcloop: sync targetport removal
2025-03-18 10:40 ` [PATCH v3 06/18] nvmet-fcloop: sync targetport removal Daniel Wagner
@ 2025-03-18 11:07 ` Hannes Reinecke
2025-03-21 6:08 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:07 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> The nvmet-fc uses references on the targetport to ensure no UAFs
> happens. The consequence is that when the targetport is unregistered,
> not all resources are freed immediately. Ensure that all activities from
> the unregister call have been submitted (deassocication) before
> continuing with the shutdown sequence.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 15 ++++++++++++++-
> 1 file changed, 14 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index 69121a5f0f280936d1b720e9e994d6e5eb9186ff..cddaa424bb3ff62156cef14c787fdcb33c15d76e 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -239,6 +239,7 @@ struct fcloop_nport {
> struct fcloop_rport *rport;
> struct fcloop_tport *tport;
> struct fcloop_lport *lport;
> + struct completion tport_unreg_done;
> struct list_head nport_list;
> refcount_t ref;
> u64 node_name;
> @@ -1078,6 +1079,8 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
> tport->nport->tport = NULL;
> spin_unlock_irqrestore(&fcloop_lock, flags);
>
> + complete(&tport->nport->tport_unreg_done);
> +
> /* nport ref put: tport */
> fcloop_nport_put(tport->nport);
> }
> @@ -1507,7 +1510,17 @@ __unlink_target_port(struct fcloop_nport *nport)
> static int
> __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
> {
> - return nvmet_fc_unregister_targetport(tport->targetport);
> + int ret;
> +
> + init_completion(&nport->tport_unreg_done);
> +
> + ret = nvmet_fc_unregister_targetport(tport->targetport);
> + if (ret)
> + return ret;
> +
> + wait_for_completion(&nport->tport_unreg_done);
> +
> + return 0;
> }
>
> static ssize_t
>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 06/18] nvmet-fcloop: sync targetport removal
2025-03-18 10:40 ` [PATCH v3 06/18] nvmet-fcloop: sync targetport removal Daniel Wagner
2025-03-18 11:07 ` Hannes Reinecke
@ 2025-03-21 6:08 ` Christoph Hellwig
2025-04-02 16:39 ` Daniel Wagner
1 sibling, 1 reply; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:08 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:40:00AM +0100, Daniel Wagner wrote:
> The nvmet-fc uses references on the targetport to ensure no UAFs
> happens. The consequence is that when the targetport is unregistered,
> not all resources are freed immediately. Ensure that all activities from
> the unregister call have been submitted (deassocication) before
> continuing with the shutdown sequence.
This needs to explain why that is needed. In general a completion
waiting for references to go away is a bit of an anti-patter, so
it should come with a good excuse.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 06/18] nvmet-fcloop: sync targetport removal
2025-03-21 6:08 ` Christoph Hellwig
@ 2025-04-02 16:39 ` Daniel Wagner
0 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-04-02 16:39 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Daniel Wagner, James Smart, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Fri, Mar 21, 2025 at 07:08:32AM +0100, Christoph Hellwig wrote:
> On Tue, Mar 18, 2025 at 11:40:00AM +0100, Daniel Wagner wrote:
> > The nvmet-fc uses references on the targetport to ensure no UAFs
> > happens. The consequence is that when the targetport is unregistered,
> > not all resources are freed immediately. Ensure that all activities from
> > the unregister call have been submitted (deassocication) before
> > continuing with the shutdown sequence.
>
> This needs to explain why that is needed. In general a completion
> waiting for references to go away is a bit of an anti-patter, so
> it should come with a good excuse.
FWIW, I was able to drop this patch and also the similar code for the
lport object. The reason is with the 'nvmet-fcloop: allocate/free
fcloop_lsreq directly' patch this is not necessary anymore.
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 07/18] nvmet-fcloop: update refs on tfcp_req
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (5 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 06/18] nvmet-fcloop: sync targetport removal Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:09 ` Hannes Reinecke
2025-03-21 6:08 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done Daniel Wagner
` (10 subsequent siblings)
17 siblings, 2 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
Track the lifetime of the in-flight tfcp_req to ensure
the object is not freed too early.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index cddaa424bb3ff62156cef14c787fdcb33c15d76e..cadf081e3653c641b0afcb0968fc74299ab941d1 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -566,7 +566,8 @@ fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
}
/* release original io reference on tgt struct */
- fcloop_tfcp_req_put(tfcp_req);
+ if (tfcp_req)
+ fcloop_tfcp_req_put(tfcp_req);
}
static bool drop_fabric_opcode;
@@ -671,6 +672,7 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
break;
default:
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ fcloop_tfcp_req_put(tfcp_req);
WARN_ON(1);
return;
}
@@ -958,8 +960,10 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
spin_lock(&inireq->inilock);
tfcp_req = inireq->tfcp_req;
- if (tfcp_req)
- fcloop_tfcp_req_get(tfcp_req);
+ if (tfcp_req) {
+ if (!fcloop_tfcp_req_get(tfcp_req))
+ tfcp_req = NULL;
+ }
spin_unlock(&inireq->inilock);
if (!tfcp_req)
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 07/18] nvmet-fcloop: update refs on tfcp_req
2025-03-18 10:40 ` [PATCH v3 07/18] nvmet-fcloop: update refs on tfcp_req Daniel Wagner
@ 2025-03-18 11:09 ` Hannes Reinecke
2025-03-21 6:08 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:09 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> Track the lifetime of the in-flight tfcp_req to ensure
> the object is not freed too early.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 10 +++++++---
> 1 file changed, 7 insertions(+), 3 deletions(-)
>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 07/18] nvmet-fcloop: update refs on tfcp_req
2025-03-18 10:40 ` [PATCH v3 07/18] nvmet-fcloop: update refs on tfcp_req Daniel Wagner
2025-03-18 11:09 ` Hannes Reinecke
@ 2025-03-21 6:08 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:08 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (6 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 07/18] nvmet-fcloop: update refs on tfcp_req Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:12 ` Hannes Reinecke
2025-03-21 6:12 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 09/18] nvmet-fcloop: prevent double port deletion Daniel Wagner
` (9 subsequent siblings)
17 siblings, 2 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
Add the missing fcloop_call_host_done calls so that the caller
frees resources when something goes wrong.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index cadf081e3653c641b0afcb0968fc74299ab941d1..de23f0bc5599b6f8dd5c3713dd38c952e6fdda28 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -966,9 +966,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
}
spin_unlock(&inireq->inilock);
- if (!tfcp_req)
+ if (!tfcp_req) {
/* abort has already been called */
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
return;
+ }
/* break initiator/target relationship for io */
spin_lock_irqsave(&tfcp_req->reqlock, flags);
@@ -982,6 +984,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
break;
default:
spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
WARN_ON(1);
return;
}
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done
2025-03-18 10:40 ` [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done Daniel Wagner
@ 2025-03-18 11:12 ` Hannes Reinecke
2025-03-18 13:49 ` Daniel Wagner
2025-03-21 6:12 ` Christoph Hellwig
1 sibling, 1 reply; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:12 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> Add the missing fcloop_call_host_done calls so that the caller
> frees resources when something goes wrong.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 5 ++++-
> 1 file changed, 4 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index cadf081e3653c641b0afcb0968fc74299ab941d1..de23f0bc5599b6f8dd5c3713dd38c952e6fdda28 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -966,9 +966,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
> }
> spin_unlock(&inireq->inilock);
>
> - if (!tfcp_req)
> + if (!tfcp_req) {
> /* abort has already been called */
> + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
Am I misreading things or will fcloop_call_host_done() crash on a NULL
tfcp_req ?
In patch 3 fcloop_tfcp_req_put() doesn't check for a NULL argument...
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done
2025-03-18 11:12 ` Hannes Reinecke
@ 2025-03-18 13:49 ` Daniel Wagner
2025-04-02 17:08 ` Daniel Wagner
0 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 13:49 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 12:12:52PM +0100, Hannes Reinecke wrote:
> On 3/18/25 11:40, Daniel Wagner wrote:
> > Add the missing fcloop_call_host_done calls so that the caller
> > frees resources when something goes wrong.
> >
> > Signed-off-by: Daniel Wagner <wagi@kernel.org>
> > ---
> > drivers/nvme/target/fcloop.c | 5 ++++-
> > 1 file changed, 4 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> > index cadf081e3653c641b0afcb0968fc74299ab941d1..de23f0bc5599b6f8dd5c3713dd38c952e6fdda28 100644
> > --- a/drivers/nvme/target/fcloop.c
> > +++ b/drivers/nvme/target/fcloop.c
> > @@ -966,9 +966,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
> > }
> > spin_unlock(&inireq->inilock);
> > - if (!tfcp_req)
> > + if (!tfcp_req) {
> > /* abort has already been called */
> > + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
>
> Am I misreading things or will fcloop_call_host_done() crash on a NULL
> tfcp_req ?
>
> In patch 3 fcloop_tfcp_req_put() doesn't check for a NULL argument...
There is NULL pointer check in fcloop_call_host_done eventually. It is
in 'nvmet-fcloop: update refs on tfcp_req'. That hunk should be in this
patch though.
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done
2025-03-18 13:49 ` Daniel Wagner
@ 2025-04-02 17:08 ` Daniel Wagner
2025-04-03 13:25 ` Daniel Wagner
0 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-04-02 17:08 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 02:49:14PM +0100, Daniel Wagner wrote:
> On Tue, Mar 18, 2025 at 12:12:52PM +0100, Hannes Reinecke wrote:
> > On 3/18/25 11:40, Daniel Wagner wrote:
> > > Add the missing fcloop_call_host_done calls so that the caller
> > > frees resources when something goes wrong.
> > >
> > > Signed-off-by: Daniel Wagner <wagi@kernel.org>
> > > ---
> > > drivers/nvme/target/fcloop.c | 5 ++++-
> > > 1 file changed, 4 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> > > index cadf081e3653c641b0afcb0968fc74299ab941d1..de23f0bc5599b6f8dd5c3713dd38c952e6fdda28 100644
> > > --- a/drivers/nvme/target/fcloop.c
> > > +++ b/drivers/nvme/target/fcloop.c
> > > @@ -966,9 +966,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
> > > }
> > > spin_unlock(&inireq->inilock);
> > > - if (!tfcp_req)
> > > + if (!tfcp_req) {
> > > /* abort has already been called */
> > > + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
> >
> > Am I misreading things or will fcloop_call_host_done() crash on a NULL
> > tfcp_req ?
> >
> > In patch 3 fcloop_tfcp_req_put() doesn't check for a NULL argument...
>
> There is NULL pointer check in fcloop_call_host_done eventually. It is
> in 'nvmet-fcloop: update refs on tfcp_req'. That hunk should be in this
> patch though.
Looking again with fresh eyes. Patch #3 is adding ref counting to the
lport. 'nvmet-fcloop: update refs on tfcp_req' (the patch before this
one) adds the NULL check. Nothing will crash here. Actually, I've run
into this crash when testing before the NULL check was there :)
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done
2025-04-02 17:08 ` Daniel Wagner
@ 2025-04-03 13:25 ` Daniel Wagner
2025-04-04 7:28 ` Daniel Wagner
0 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-04-03 13:25 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Wed, Apr 02, 2025 at 07:08:06PM +0200, Daniel Wagner wrote:
> > There is NULL pointer check in fcloop_call_host_done eventually. It is
> > in 'nvmet-fcloop: update refs on tfcp_req'. That hunk should be in this
> > patch though.
>
> Looking again with fresh eyes. Patch #3 is adding ref counting to the
> lport. 'nvmet-fcloop: update refs on tfcp_req' (the patch before this
> one) adds the NULL check. Nothing will crash here. Actually, I've run
> into this crash when testing before the NULL check was there :)
After a bit more testing and a new KASAN report, it looks like yet
another life tracking for tfcp_req/fcpreq is a bit off. The whole
conditional free/put indicates a something is wrong IMO. Let me see if I
can resovle this a bit cleaner.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done
2025-04-03 13:25 ` Daniel Wagner
@ 2025-04-04 7:28 ` Daniel Wagner
0 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-04-04 7:28 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Thu, Apr 03, 2025 at 03:25:48PM +0200, Daniel Wagner wrote:
> On Wed, Apr 02, 2025 at 07:08:06PM +0200, Daniel Wagner wrote:
> > > There is NULL pointer check in fcloop_call_host_done eventually. It is
> > > in 'nvmet-fcloop: update refs on tfcp_req'. That hunk should be in this
> > > patch though.
> >
> > Looking again with fresh eyes. Patch #3 is adding ref counting to the
> > lport. 'nvmet-fcloop: update refs on tfcp_req' (the patch before this
> > one) adds the NULL check. Nothing will crash here. Actually, I've run
> > into this crash when testing before the NULL check was there :)
>
> After a bit more testing and a new KASAN report, it looks like yet
> another life tracking for tfcp_req/fcpreq is a bit off. The whole
> conditional free/put indicates a something is wrong IMO. Let me see if I
> can resovle this a bit cleaner.
I found the issue which caused KASAN being unhappy. There is a state
machine for the fcp request state (active/idle/completed/aborted) which
got out of sync. I didn't want to change everything at this stage just
for the sake of refactoring. The conditional frees are still there.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done
2025-03-18 10:40 ` [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done Daniel Wagner
2025-03-18 11:12 ` Hannes Reinecke
@ 2025-03-21 6:12 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:12 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:40:02AM +0100, Daniel Wagner wrote:
> - if (!tfcp_req)
> + if (!tfcp_req) {
> /* abort has already been called */
> + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
> return;
> + }
>
> /* break initiator/target relationship for io */
> spin_lock_irqsave(&tfcp_req->reqlock, flags);
> @@ -982,6 +984,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
> break;
> default:
> spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
> + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
Maybe share this using a goto?
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 09/18] nvmet-fcloop: prevent double port deletion
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (7 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 08/18] nvmet-fcloop: add missing fcloop_callback_host_done Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:15 ` Hannes Reinecke
2025-03-21 6:13 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly Daniel Wagner
` (8 subsequent siblings)
17 siblings, 2 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
The delete callback can be called either via the unregister function or
from the transport directly. Thus it is necessary ensure resources are
not freed multiple times.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index de23f0bc5599b6f8dd5c3713dd38c952e6fdda28..06f42da6a0335c53ae319133119d057aab12e07e 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -215,6 +215,8 @@ struct fcloop_lport_priv {
struct fcloop_lport *lport;
};
+#define PORT_DELETE 0
+
struct fcloop_rport {
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
@@ -223,6 +225,7 @@ struct fcloop_rport {
spinlock_t lock;
struct list_head ls_list;
struct work_struct ls_work;
+ unsigned long flags;
};
struct fcloop_tport {
@@ -233,6 +236,7 @@ struct fcloop_tport {
spinlock_t lock;
struct list_head ls_list;
struct work_struct ls_work;
+ unsigned long flags;
};
struct fcloop_nport {
@@ -1062,14 +1066,20 @@ static void
fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
+ bool delete_port = true;
unsigned long flags;
flush_work(&rport->ls_work);
spin_lock_irqsave(&fcloop_lock, flags);
+ if (test_and_set_bit(PORT_DELETE, &rport->flags))
+ delete_port = false;
rport->nport->rport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags);
+ if (!delete_port)
+ return;
+
/* nport ref put: rport */
fcloop_nport_put(rport->nport);
}
@@ -1078,14 +1088,20 @@ static void
fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
{
struct fcloop_tport *tport = targetport->private;
+ bool delete_port = true;
unsigned long flags;
flush_work(&tport->ls_work);
spin_lock_irqsave(&fcloop_lock, flags);
+ if (test_and_set_bit(PORT_DELETE, &tport->flags))
+ delete_port = false;
tport->nport->tport = NULL;
spin_unlock_irqrestore(&fcloop_lock, flags);
+ if (!delete_port)
+ return;
+
complete(&tport->nport->tport_unreg_done);
/* nport ref put: tport */
@@ -1394,6 +1410,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->nport = nport;
rport->lport = nport->lport;
nport->rport = rport;
+ rport->flags = 0;
spin_lock_init(&rport->lock);
INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
INIT_LIST_HEAD(&rport->ls_list);
@@ -1492,6 +1509,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
tport->nport = nport;
tport->lport = nport->lport;
nport->tport = tport;
+ tport->flags = 0;
spin_lock_init(&tport->lock);
INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
INIT_LIST_HEAD(&tport->ls_list);
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 09/18] nvmet-fcloop: prevent double port deletion
2025-03-18 10:40 ` [PATCH v3 09/18] nvmet-fcloop: prevent double port deletion Daniel Wagner
@ 2025-03-18 11:15 ` Hannes Reinecke
2025-03-18 13:55 ` Daniel Wagner
2025-03-21 6:13 ` Christoph Hellwig
1 sibling, 1 reply; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:15 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> The delete callback can be called either via the unregister function or
> from the transport directly. Thus it is necessary ensure resources are
> not freed multiple times.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 18 ++++++++++++++++++
> 1 file changed, 18 insertions(+)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index de23f0bc5599b6f8dd5c3713dd38c952e6fdda28..06f42da6a0335c53ae319133119d057aab12e07e 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -215,6 +215,8 @@ struct fcloop_lport_priv {
> struct fcloop_lport *lport;
> };
>
> +#define PORT_DELETE 0
> +
> struct fcloop_rport {
> struct nvme_fc_remote_port *remoteport;
> struct nvmet_fc_target_port *targetport;
> @@ -223,6 +225,7 @@ struct fcloop_rport {
> spinlock_t lock;
> struct list_head ls_list;
> struct work_struct ls_work;
> + unsigned long flags;
> };
>
> struct fcloop_tport {
> @@ -233,6 +236,7 @@ struct fcloop_tport {
> spinlock_t lock;
> struct list_head ls_list;
> struct work_struct ls_work;
> + unsigned long flags;
> };
>
> struct fcloop_nport {
> @@ -1062,14 +1066,20 @@ static void
> fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
> {
> struct fcloop_rport *rport = remoteport->private;
> + bool delete_port = true;
> unsigned long flags;
>
> flush_work(&rport->ls_work);
>
> spin_lock_irqsave(&fcloop_lock, flags);
> + if (test_and_set_bit(PORT_DELETE, &rport->flags))
> + delete_port = false;
> rport->nport->rport = NULL;
> spin_unlock_irqrestore(&fcloop_lock, flags);
>
Can't you just check for a NULL rport->nport->rport pointer
and do away with the PORT_DELETE flag?
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 09/18] nvmet-fcloop: prevent double port deletion
2025-03-18 11:15 ` Hannes Reinecke
@ 2025-03-18 13:55 ` Daniel Wagner
0 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 13:55 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 12:15:04PM +0100, Hannes Reinecke wrote:
> > fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
> > {
> > struct fcloop_rport *rport = remoteport->private;
> > + bool delete_port = true;
> > unsigned long flags;
> > flush_work(&rport->ls_work);
> > spin_lock_irqsave(&fcloop_lock, flags);
> > + if (test_and_set_bit(PORT_DELETE, &rport->flags))
> > + delete_port = false;
> > rport->nport->rport = NULL;
> > spin_unlock_irqrestore(&fcloop_lock, flags);
> Can't you just check for a NULL rport->nport->rport pointer
> and do away with the PORT_DELETE flag?
Unfortunately, nport->rport is also set to NULL in __unlink_remote_port
and __unlink_target_port. If we would just update the pointer here, it
would be possible.
I played a bit around when to clear the nport->rport pointer but it
didn't work. There were always some UAFs or NULL pointer accesses. With
the flags I was able to get it fixed. I am not insisting on this
solution, just trying to explain why I choosed it.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 09/18] nvmet-fcloop: prevent double port deletion
2025-03-18 10:40 ` [PATCH v3 09/18] nvmet-fcloop: prevent double port deletion Daniel Wagner
2025-03-18 11:15 ` Hannes Reinecke
@ 2025-03-21 6:13 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:13 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:40:03AM +0100, Daniel Wagner wrote:
> };
>
> +#define PORT_DELETE 0
The way I read the code this should be PORT_DELETED?
Also maybe add a little comment, once we grow more flags that really
helps.
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (8 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 09/18] nvmet-fcloop: prevent double port deletion Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:17 ` Hannes Reinecke
2025-03-19 22:47 ` James Smart
2025-03-18 10:40 ` [PATCH v3 11/18] nvmet-fc: inline nvmet_fc_delete_assoc Daniel Wagner
` (7 subsequent siblings)
17 siblings, 2 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
fcloop depends on the host or the target to allocate the fcloop_lsreq
object. This means that the lifetime of the fcloop_lsreq is tied to
either the host or the target. Consequently, the host or the target must
cooperate during shutdown.
Unfortunately, this approach does not work well when the target forces a
shutdown, as there are dependencies that are difficult to resolve in a
clean way.
The simplest solution is to decouple the lifetime of the fcloop_lsreq
object by managing them directly within fcloop. Since this is not a
performance-critical path and only a small number of LS objects are used
during setup and cleanup, it does not significantly impact performance
to allocate them during normal operation.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fcloop.c | 53 +++++++++++++++++++++++++++++---------------
1 file changed, 35 insertions(+), 18 deletions(-)
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 06f42da6a0335c53ae319133119d057aab12e07e..537fc6533a4cf5d39855cf850b82af739eeb3056 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -342,6 +342,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work)
* callee may free memory containing tls_req.
* do not reference lsreq after this.
*/
+ kfree(tls_req);
spin_lock(&rport->lock);
}
@@ -353,10 +354,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport,
struct nvmefc_ls_req *lsreq)
{
- struct fcloop_lsreq *tls_req = lsreq->private;
struct fcloop_rport *rport = remoteport->private;
+ struct fcloop_lsreq *tls_req;
int ret = 0;
+ tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
+ if (!tls_req)
+ return -ENOMEM;
tls_req->lsreq = lsreq;
INIT_LIST_HEAD(&tls_req->ls_list);
@@ -387,19 +391,23 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
struct nvme_fc_remote_port *remoteport = tport->remoteport;
struct fcloop_rport *rport;
+
+ if (!remoteport) {
+ kfree(tls_req);
+ return -ECONNREFUSED;
+ }
+
memcpy(lsreq->rspaddr, lsrsp->rspbuf,
((lsreq->rsplen < lsrsp->rsplen) ?
lsreq->rsplen : lsrsp->rsplen));
lsrsp->done(lsrsp);
- if (remoteport) {
- rport = remoteport->private;
- spin_lock(&rport->lock);
- list_add_tail(&tls_req->ls_list, &rport->ls_list);
- spin_unlock(&rport->lock);
- queue_work(nvmet_wq, &rport->ls_work);
- }
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
return 0;
}
@@ -426,6 +434,7 @@ fcloop_tport_lsrqst_work(struct work_struct *work)
* callee may free memory containing tls_req.
* do not reference lsreq after this.
*/
+ kfree(tls_req);
spin_lock(&tport->lock);
}
@@ -436,8 +445,8 @@ static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
struct nvmefc_ls_req *lsreq)
{
- struct fcloop_lsreq *tls_req = lsreq->private;
struct fcloop_tport *tport = targetport->private;
+ struct fcloop_lsreq *tls_req;
int ret = 0;
/*
@@ -445,6 +454,10 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
* hosthandle ignored as fcloop currently is
* 1:1 tgtport vs remoteport
*/
+
+ tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
+ if (!tls_req)
+ return -ENOMEM;
tls_req->lsreq = lsreq;
INIT_LIST_HEAD(&tls_req->ls_list);
@@ -461,6 +474,9 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
lsreq->rqstaddr, lsreq->rqstlen);
+ if (ret)
+ kfree(tls_req);
+
return ret;
}
@@ -475,18 +491,21 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
struct nvmet_fc_target_port *targetport = rport->targetport;
struct fcloop_tport *tport;
+ if (!targetport) {
+ kfree(tls_req);
+ return -ECONNREFUSED;
+ }
+
memcpy(lsreq->rspaddr, lsrsp->rspbuf,
((lsreq->rsplen < lsrsp->rsplen) ?
lsreq->rsplen : lsrsp->rsplen));
lsrsp->done(lsrsp);
- if (targetport) {
- tport = targetport->private;
- spin_lock(&tport->lock);
- list_add_tail(&tport->ls_list, &tls_req->ls_list);
- spin_unlock(&tport->lock);
- queue_work(nvmet_wq, &tport->ls_work);
- }
+ tport = targetport->private;
+ spin_lock(&tport->lock);
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ spin_unlock(&tport->lock);
+ queue_work(nvmet_wq, &tport->ls_work);
return 0;
}
@@ -1129,7 +1148,6 @@ static struct nvme_fc_port_template fctemplate = {
/* sizes of additional private data for data structures */
.local_priv_sz = sizeof(struct fcloop_lport_priv),
.remote_priv_sz = sizeof(struct fcloop_rport),
- .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
};
@@ -1152,7 +1170,6 @@ static struct nvmet_fc_target_template tgttemplate = {
.target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
- .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
};
static ssize_t
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly
2025-03-18 10:40 ` [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly Daniel Wagner
@ 2025-03-18 11:17 ` Hannes Reinecke
2025-03-18 13:58 ` Daniel Wagner
2025-03-19 22:47 ` James Smart
1 sibling, 1 reply; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:17 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> fcloop depends on the host or the target to allocate the fcloop_lsreq
> object. This means that the lifetime of the fcloop_lsreq is tied to
> either the host or the target. Consequently, the host or the target must
> cooperate during shutdown.
>
> Unfortunately, this approach does not work well when the target forces a
> shutdown, as there are dependencies that are difficult to resolve in a
> clean way.
>
> The simplest solution is to decouple the lifetime of the fcloop_lsreq
> object by managing them directly within fcloop. Since this is not a
> performance-critical path and only a small number of LS objects are used
> during setup and cleanup, it does not significantly impact performance
> to allocate them during normal operation.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 53 +++++++++++++++++++++++++++++---------------
> 1 file changed, 35 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target
fcloop.c> index
06f42da6a0335c53ae319133119d057aab12e07e..537fc6533a4cf5d39855cf850b82af739eeb3056
100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -342,6 +342,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work)
> * callee may free memory containing tls_req.
> * do not reference lsreq after this.
> */
> + kfree(tls_req);
>
> spin_lock(&rport->lock);
> }
> @@ -353,10 +354,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
> struct nvme_fc_remote_port *remoteport,
> struct nvmefc_ls_req *lsreq)
> {
> - struct fcloop_lsreq *tls_req = lsreq->private;
> struct fcloop_rport *rport = remoteport->private;
> + struct fcloop_lsreq *tls_req;
> int ret = 0;
>
> + tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
> + if (!tls_req)
> + return -ENOMEM;
This cries out for kmem_cache_alloc() ...
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly
2025-03-18 11:17 ` Hannes Reinecke
@ 2025-03-18 13:58 ` Daniel Wagner
2025-04-08 11:20 ` Daniel Wagner
0 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 13:58 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 12:17:11PM +0100, Hannes Reinecke wrote:
> > + tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
> > + if (!tls_req)
> > + return -ENOMEM;
>
> This cries out for kmem_cache_alloc() ...
Okay, will switch to this API. FWIW, in the same call path there are
more kmallocs.
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly
2025-03-18 13:58 ` Daniel Wagner
@ 2025-04-08 11:20 ` Daniel Wagner
0 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-04-08 11:20 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 02:58:35PM +0100, Daniel Wagner wrote:
> On Tue, Mar 18, 2025 at 12:17:11PM +0100, Hannes Reinecke wrote:
> > > + tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
> > > + if (!tls_req)
> > > + return -ENOMEM;
> >
> > This cries out for kmem_cache_alloc() ...
>
> Okay, will switch to this API. FWIW, in the same call path there are
> more kmallocs.
This change send me down yet another rabbit hole because when
deallocatin the cache, it reported a leak caused by a list_add_tail(x,
y) vs list_add_tail(y,x) bug which I missed the last time...
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly
2025-03-18 10:40 ` [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly Daniel Wagner
2025-03-18 11:17 ` Hannes Reinecke
@ 2025-03-19 22:47 ` James Smart
2025-04-04 12:53 ` Daniel Wagner
1 sibling, 1 reply; 62+ messages in thread
From: James Smart @ 2025-03-19 22:47 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On 3/18/2025 3:40 AM, Daniel Wagner wrote:
> fcloop depends on the host or the target to allocate the fcloop_lsreq
> object. This means that the lifetime of the fcloop_lsreq is tied to
> either the host or the target. Consequently, the host or the target must
> cooperate during shutdown.
>
> Unfortunately, this approach does not work well when the target forces a
> shutdown, as there are dependencies that are difficult to resolve in a
> clean way.
ok - although I'm guessing you'll trading one set of problems for another.
>
> The simplest solution is to decouple the lifetime of the fcloop_lsreq
> object by managing them directly within fcloop. Since this is not a
> performance-critical path and only a small number of LS objects are used
> during setup and cleanup, it does not significantly impact performance
> to allocate them during normal operation.
ok
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fcloop.c | 53 +++++++++++++++++++++++++++++---------------
> 1 file changed, 35 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
> index 06f42da6a0335c53ae319133119d057aab12e07e..537fc6533a4cf5d39855cf850b82af739eeb3056 100644
> --- a/drivers/nvme/target/fcloop.c
> +++ b/drivers/nvme/target/fcloop.c
> @@ -342,6 +342,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work)
> * callee may free memory containing tls_req.
> * do not reference lsreq after this.
> */
> + kfree(tls_req);
>
> spin_lock(&rport->lock);
> }
> @@ -353,10 +354,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
> struct nvme_fc_remote_port *remoteport,
> struct nvmefc_ls_req *lsreq)
> {
> - struct fcloop_lsreq *tls_req = lsreq->private;
> struct fcloop_rport *rport = remoteport->private;
> + struct fcloop_lsreq *tls_req;
> int ret = 0;
>
> + tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
> + if (!tls_req)
> + return -ENOMEM;
> tls_req->lsreq = lsreq;
> INIT_LIST_HEAD(&tls_req->ls_list);
>
> @@ -387,19 +391,23 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
> struct nvme_fc_remote_port *remoteport = tport->remoteport;
> struct fcloop_rport *rport;
>
> +
> + if (!remoteport) {
> + kfree(tls_req);
> + return -ECONNREFUSED;
> + }
> +
don't do this - this is not a path the lldd would generate.
> memcpy(lsreq->rspaddr, lsrsp->rspbuf,
> ((lsreq->rsplen < lsrsp->rsplen) ?
> lsreq->rsplen : lsrsp->rsplen));
>
> lsrsp->done(lsrsp);
This done() call should always be made regardless of the remoteport
presence.
instead, put the check here
if (!remoteport) {
kfree(tls_req);
return 0;
}
>
> - if (remoteport) {
> - rport = remoteport->private;
> - spin_lock(&rport->lock);
> - list_add_tail(&tls_req->ls_list, &rport->ls_list);
> - spin_unlock(&rport->lock);
> - queue_work(nvmet_wq, &rport->ls_work);
> - }
> + rport = remoteport->private;
> + spin_lock(&rport->lock);
> + list_add_tail(&tls_req->ls_list, &rport->ls_list);
> + spin_unlock(&rport->lock);
> + queue_work(nvmet_wq, &rport->ls_work);
this is just an indentation style - whichever way works.
>
> return 0;
> }
> @@ -426,6 +434,7 @@ fcloop_tport_lsrqst_work(struct work_struct *work)
> * callee may free memory containing tls_req.
> * do not reference lsreq after this.
> */
> + kfree(tls_req);
>
> spin_lock(&tport->lock);
> }
> @@ -436,8 +445,8 @@ static int
> fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
> struct nvmefc_ls_req *lsreq)
> {
> - struct fcloop_lsreq *tls_req = lsreq->private;
> struct fcloop_tport *tport = targetport->private;
> + struct fcloop_lsreq *tls_req;
> int ret = 0;
>
> /*
> @@ -445,6 +454,10 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
> * hosthandle ignored as fcloop currently is
> * 1:1 tgtport vs remoteport
> */
> +
> + tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
> + if (!tls_req)
> + return -ENOMEM;
> tls_req->lsreq = lsreq;
> INIT_LIST_HEAD(&tls_req->ls_list);
>
> @@ -461,6 +474,9 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
> ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
> lsreq->rqstaddr, lsreq->rqstlen);
>
> + if (ret)
> + kfree(tls_req);
> +
> return ret;
> }
>
> @@ -475,18 +491,21 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
> struct nvmet_fc_target_port *targetport = rport->targetport;
> struct fcloop_tport *tport;
>
> + if (!targetport) {
> + kfree(tls_req);
> + return -ECONNREFUSED;
> + }
> +
same here - don't do this - this is not a path the lldd would generate.
> memcpy(lsreq->rspaddr, lsrsp->rspbuf,
> ((lsreq->rsplen < lsrsp->rsplen) ?
> lsreq->rsplen : lsrsp->rsplen));
> lsrsp->done(lsrsp);
>
Same for this done().
instead, put the check here
if (!targetport) {
kfree(tls_req);
return 0;
}
> - if (targetport) {
> - tport = targetport->private;
> - spin_lock(&tport->lock);
> - list_add_tail(&tport->ls_list, &tls_req->ls_list);
> - spin_unlock(&tport->lock);
> - queue_work(nvmet_wq, &tport->ls_work);
> - }
> + tport = targetport->private;
> + spin_lock(&tport->lock);
> + list_add_tail(&tport->ls_list, &tls_req->ls_list);
> + spin_unlock(&tport->lock);
> + queue_work(nvmet_wq, &tport->ls_work);
>
> return 0;
> }
> @@ -1129,7 +1148,6 @@ static struct nvme_fc_port_template fctemplate = {
> /* sizes of additional private data for data structures */
> .local_priv_sz = sizeof(struct fcloop_lport_priv),
> .remote_priv_sz = sizeof(struct fcloop_rport),
> - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
> .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
> };
>
> @@ -1152,7 +1170,6 @@ static struct nvmet_fc_target_template tgttemplate = {
> .target_features = 0,
> /* sizes of additional private data for data structures */
> .target_priv_sz = sizeof(struct fcloop_tport),
> - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
> };
>
> static ssize_t
>
-- james
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly
2025-03-19 22:47 ` James Smart
@ 2025-04-04 12:53 ` Daniel Wagner
0 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-04-04 12:53 UTC (permalink / raw)
To: James Smart
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Hannes Reinecke, Keith Busch, linux-nvme,
linux-kernel
Hi James,
On Wed, Mar 19, 2025 at 03:47:20PM -0700, James Smart wrote:
> On 3/18/2025 3:40 AM, Daniel Wagner wrote:
> > fcloop depends on the host or the target to allocate the fcloop_lsreq
> > object. This means that the lifetime of the fcloop_lsreq is tied to
> > either the host or the target. Consequently, the host or the target must
> > cooperate during shutdown.
> >
> > Unfortunately, this approach does not work well when the target forces a
> > shutdown, as there are dependencies that are difficult to resolve in a
> > clean way.
>
> ok - although I'm guessing you'll trading one set of problems for
> another.
Yes, there is no free lunch :)
So far I was able to deal with new problems. I've run the updated series
over night with the nasty blktest case where the target is constantly
removed and added back without any problems.
> > @@ -353,10 +354,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
> > struct nvme_fc_remote_port *remoteport,
> > struct nvmefc_ls_req *lsreq)
> > {
> > - struct fcloop_lsreq *tls_req = lsreq->private;
> > struct fcloop_rport *rport = remoteport->private;
> > + struct fcloop_lsreq *tls_req;
> > int ret = 0;
> > + tls_req = kmalloc(sizeof(*tls_req), GFP_KERNEL);
> > + if (!tls_req)
> > + return -ENOMEM;
> > tls_req->lsreq = lsreq;
> > INIT_LIST_HEAD(&tls_req->ls_list);
> > @@ -387,19 +391,23 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
> > struct nvme_fc_remote_port *remoteport = tport->remoteport;
> > struct fcloop_rport *rport;
> > +
> > + if (!remoteport) {
> > + kfree(tls_req);
> > + return -ECONNREFUSED;
> > + }
> > +
> don't do this - this is not a path the lldd would generate.
Ok, after looking at it again, I think I don't need it.
>
> > memcpy(lsreq->rspaddr, lsrsp->rspbuf,
> > ((lsreq->rsplen < lsrsp->rsplen) ?
> > lsreq->rsplen : lsrsp->rsplen));
> > lsrsp->done(lsrsp);
>
> This done() call should always be made regardless of the remoteport
> presence.
>
> instead, put the check here
>
> if (!remoteport) {
> kfree(tls_req);
> return 0;
> }
Will do
> > @@ -475,18 +491,21 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
> > struct nvmet_fc_target_port *targetport = rport->targetport;
> > struct fcloop_tport *tport;
> > + if (!targetport) {
> > + kfree(tls_req);
> > + return -ECONNREFUSED;
> > + }
> > +
>
> same here - don't do this - this is not a path the lldd would
> generate.
The early return and freeing tls_req is necessary. If the host doesn't
expect the error code, then fine. I'll remove it. The reason why we need
it is:
The target port is gone. The target doesn't expect any response
anymore and the ->done call is not valid because the resources have
been freed by nvmet_fc_free_pending_reqs.
We end up here from delete association exchange:
nvmet_fc_xmt_disconnect_assoc sends a async request.
I am adding this as comment here.
Thanks for the review. Highly appreciated!
Daniel
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 11/18] nvmet-fc: inline nvmet_fc_delete_assoc
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (9 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 10/18] nvmet-fcloop: allocate/free fcloop_lsreq directly Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-21 6:14 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 12/18] nvmet-fc: inline nvmet_fc_free_hostport Daniel Wagner
` (6 subsequent siblings)
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
No need for this tiny helper with only one user, just inline it.
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fc.c | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 8edb2b7f7a70d0a0a366f59f7db7af371e3799a0..05290506602469fc98d8463d0f1fcb6cf2422fde 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1087,13 +1087,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
return newhost;
}
-static void
-nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
-{
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
-}
-
static void
nvmet_fc_delete_assoc_work(struct work_struct *work)
{
@@ -1101,7 +1094,8 @@ nvmet_fc_delete_assoc_work(struct work_struct *work)
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- nvmet_fc_delete_assoc(assoc);
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
nvmet_fc_tgtport_put(tgtport);
}
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* [PATCH v3 12/18] nvmet-fc: inline nvmet_fc_free_hostport
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (10 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 11/18] nvmet-fc: inline nvmet_fc_delete_assoc Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-21 6:15 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 13/18] nvmet-fc: update tgtport ref per assoc Daniel Wagner
` (5 subsequent siblings)
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
No need for this tiny helper with only one user, let's inline it.
And since the hostport ref counter needs to stay in sync, it's not
optional anymore to give back the reference.
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fc.c | 22 ++++++----------------
1 file changed, 6 insertions(+), 16 deletions(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 05290506602469fc98d8463d0f1fcb6cf2422fde..20ea3acd7a5ff6f7c27b55b8c00f22b2a0768b7b 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1007,16 +1007,6 @@ nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
return kref_get_unless_zero(&hostport->ref);
}
-static void
-nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
-{
- /* if LLDD not implemented, leave as NULL */
- if (!hostport || !hostport->hosthandle)
- return;
-
- nvmet_fc_hostport_put(hostport);
-}
-
static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
@@ -1196,7 +1186,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);
- nvmet_fc_free_hostport(assoc->hostport);
+ nvmet_fc_hostport_put(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1459,11 +1449,6 @@ nvmet_fc_free_tgtport(struct kref *ref)
struct nvmet_fc_tgtport *tgtport =
container_of(ref, struct nvmet_fc_tgtport, ref);
struct device *dev = tgtport->dev;
- unsigned long flags;
-
- spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- list_del(&tgtport->tgt_list);
- spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_free_ls_iodlist(tgtport);
@@ -1624,6 +1609,11 @@ int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_del(&tgtport->tgt_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_portentry_unbind_tgt(tgtport);
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 12/18] nvmet-fc: inline nvmet_fc_free_hostport
2025-03-18 10:40 ` [PATCH v3 12/18] nvmet-fc: inline nvmet_fc_free_hostport Daniel Wagner
@ 2025-03-21 6:15 ` Christoph Hellwig
0 siblings, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:15 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:40:06AM +0100, Daniel Wagner wrote:
> No need for this tiny helper with only one user, let's inline it.
>
> And since the hostport ref counter needs to stay in sync, it's not
> optional anymore to give back the reference.
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
Btw, given that the series keeps growing and needs at least another
iteration maybe just send out the trivial cleanups and fully reviewed
simply refcounting changes ASAP to make it easier to review?
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 13/18] nvmet-fc: update tgtport ref per assoc
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (11 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 12/18] nvmet-fc: inline nvmet_fc_free_hostport Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-21 6:15 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 14/18] nvmet-fc: take tgtport reference only once Daniel Wagner
` (4 subsequent siblings)
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
We need to take for each unique association a reference.
nvmet_fc_alloc_hostport for each newly created association.
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fc.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 20ea3acd7a5ff6f7c27b55b8c00f22b2a0768b7b..8b14947906948c8b4914932837b4ec90921b419d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1139,6 +1139,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
goto out_ida;
assoc->tgtport = tgtport;
+ nvmet_fc_tgtport_get(tgtport);
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
@@ -1238,6 +1239,8 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
pr_info("{%d:%d}: Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
+
+ nvmet_fc_tgtport_put(tgtport);
}
static struct nvmet_fc_tgt_assoc *
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 13/18] nvmet-fc: update tgtport ref per assoc
2025-03-18 10:40 ` [PATCH v3 13/18] nvmet-fc: update tgtport ref per assoc Daniel Wagner
@ 2025-03-21 6:15 ` Christoph Hellwig
0 siblings, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:15 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:40:07AM +0100, Daniel Wagner wrote:
> We need to take for each unique association a reference.
> nvmet_fc_alloc_hostport for each newly created association.
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 14/18] nvmet-fc: take tgtport reference only once
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (12 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 13/18] nvmet-fc: update tgtport ref per assoc Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:18 ` Hannes Reinecke
2025-03-21 6:17 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 15/18] nvmet-fc: free pending reqs on tgtport unregister Daniel Wagner
` (3 subsequent siblings)
17 siblings, 2 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
The reference counting code can be simplified. Instead taking a tgtport
refrerence at the beginning of nvmet_fc_alloc_hostport and put it back
if not a new hostport object is allocated, only take it when a new
hostport object is allocated.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fc.c | 24 +++++++++---------------
1 file changed, 9 insertions(+), 15 deletions(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 8b14947906948c8b4914932837b4ec90921b419d..b2f5934209f9952679dc1235fb7c927818930688 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1030,33 +1030,26 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
struct nvmet_fc_hostport *newhost, *match = NULL;
unsigned long flags;
+ /*
+ * A ref on tgtport is being held while executing this function,
+ * thus there is no need to take first one and give it back on
+ * exit.
+ */
+
/* if LLDD not implemented, leave as NULL */
if (!hosthandle)
return NULL;
- /*
- * take reference for what will be the newly allocated hostport if
- * we end up using a new allocation
- */
- if (!nvmet_fc_tgtport_get(tgtport))
- return ERR_PTR(-EINVAL);
-
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
spin_unlock_irqrestore(&tgtport->lock, flags);
- if (match) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (match)
return match;
- }
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
- if (!newhost) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (!newhost)
return ERR_PTR(-ENOMEM);
- }
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
@@ -1065,6 +1058,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
kfree(newhost);
newhost = match;
} else {
+ nvmet_fc_tgtport_get(tgtport);
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 14/18] nvmet-fc: take tgtport reference only once
2025-03-18 10:40 ` [PATCH v3 14/18] nvmet-fc: take tgtport reference only once Daniel Wagner
@ 2025-03-18 11:18 ` Hannes Reinecke
2025-03-21 6:17 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:18 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> The reference counting code can be simplified. Instead taking a tgtport
> refrerence at the beginning of nvmet_fc_alloc_hostport and put it back
> if not a new hostport object is allocated, only take it when a new
> hostport object is allocated.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fc.c | 24 +++++++++---------------
> 1 file changed, 9 insertions(+), 15 deletions(-)
>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread
* Re: [PATCH v3 14/18] nvmet-fc: take tgtport reference only once
2025-03-18 10:40 ` [PATCH v3 14/18] nvmet-fc: take tgtport reference only once Daniel Wagner
2025-03-18 11:18 ` Hannes Reinecke
@ 2025-03-21 6:17 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:17 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:40:08AM +0100, Daniel Wagner wrote:
> The reference counting code can be simplified. Instead taking a tgtport
> refrerence at the beginning of nvmet_fc_alloc_hostport and put it back
> if not a new hostport object is allocated, only take it when a new
> hostport object is allocated.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fc.c | 24 +++++++++---------------
> 1 file changed, 9 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
> index 8b14947906948c8b4914932837b4ec90921b419d..b2f5934209f9952679dc1235fb7c927818930688 100644
> --- a/drivers/nvme/target/fc.c
> +++ b/drivers/nvme/target/fc.c
> @@ -1030,33 +1030,26 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
> struct nvmet_fc_hostport *newhost, *match = NULL;
> unsigned long flags;
>
> + /*
> + * A ref on tgtport is being held while executing this function,
> + * thus there is no need to take first one and give it back on
> + * exit.
> + */
I'd move this above the function and shorten it to:
/*
* Caller hpolds a reference on tgtport.
*/
Otherwise looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 15/18] nvmet-fc: free pending reqs on tgtport unregister
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (13 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 14/18] nvmet-fc: take tgtport reference only once Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-21 6:19 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 16/18] nvmet-fc: take tgtport refs for portentry Daniel Wagner
` (2 subsequent siblings)
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
When nvmet_fc_unregister_targetport is called by the LLDD, it's not
possible to communicate with the host, thus all pending request will not
be process. Thus explicitly free them.
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Daniel Wagner <wagi@kernel.org>
nvmet-fc: merge with f200af94ac9d ("nvmet-fc: free pending reqs on tgtport unregister")
---
drivers/nvme/target/fc.c | 46 +++++++++++++++++++++++++++++++++++++++-------
1 file changed, 39 insertions(+), 7 deletions(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index b2f5934209f9952679dc1235fb7c927818930688..d10ddcb57c1b09d871152f0d9a48f93ec6dc8685 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1591,6 +1591,44 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
+static void
+nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_ls_req_op *lsop;
+ struct nvmefc_ls_req *lsreq;
+ struct nvmet_fc_ls_iod *iod;
+ int i;
+
+ iod = tgtport->iod;
+ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++)
+ cancel_work(&iod->work);
+
+ /*
+ * After this point the connection is lost and thus any pending
+ * request can't be processed by the normal completion path. This
+ * is likely a request from nvmet_fc_send_ls_req_async.
+ */
+ for (;;) {
+ lsop = list_first_entry_or_null(&tgtport->ls_req_list,
+ struct nvmet_fc_ls_req_op,
+ lsreq_list);
+ if (!lsop)
+ break;
+
+ list_del(&lsop->lsreq_list);
+
+ if (!lsop->req_queued)
+ continue;
+
+ lsreq = &lsop->ls_req;
+ fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+ nvmet_fc_tgtport_put(tgtport);
+ kfree(lsop);
+ }
+}
+
/**
* nvmet_fc_unregister_targetport - transport entry point called by an
* LLDD to deregister/remove a previously
@@ -1619,13 +1657,7 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
flush_workqueue(nvmet_wq);
- /*
- * should terminate LS's as well. However, LS's will be generated
- * at the tail end of association termination, so they likely don't
- * exist yet. And even if they did, it's worthwhile to just let
- * them finish and targetport ref counting will clean things up.
- */
-
+ nvmet_fc_free_pending_reqs(tgtport);
nvmet_fc_tgtport_put(tgtport);
return 0;
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 15/18] nvmet-fc: free pending reqs on tgtport unregister
2025-03-18 10:40 ` [PATCH v3 15/18] nvmet-fc: free pending reqs on tgtport unregister Daniel Wagner
@ 2025-03-21 6:19 ` Christoph Hellwig
2025-04-08 11:29 ` Daniel Wagner
0 siblings, 1 reply; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:19 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:40:09AM +0100, Daniel Wagner wrote:
> nvmet-fc: merge with f200af94ac9d ("nvmet-fc: free pending reqs on tgtport unregister")
What is this supposed to mean?
> + for (;;) {
> + lsop = list_first_entry_or_null(&tgtport->ls_req_list,
> + struct nvmet_fc_ls_req_op,
> + lsreq_list);
> + if (!lsop)
> + break;
while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list,
struct nvmet_fc_ls_req_op, lsreq_list))) {
> +
> + list_del(&lsop->lsreq_list);
Another case where I'd really love to help the list_pop helper Linus
flamed me for :(
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 15/18] nvmet-fc: free pending reqs on tgtport unregister
2025-03-21 6:19 ` Christoph Hellwig
@ 2025-04-08 11:29 ` Daniel Wagner
0 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-04-08 11:29 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Daniel Wagner, James Smart, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Fri, Mar 21, 2025 at 07:19:03AM +0100, Christoph Hellwig wrote:
> On Tue, Mar 18, 2025 at 11:40:09AM +0100, Daniel Wagner wrote:
> > nvmet-fc: merge with f200af94ac9d ("nvmet-fc: free pending reqs on tgtport unregister")
>
> What is this supposed to mean?
This a left over from my dev work.
> > + for (;;) {
> > + lsop = list_first_entry_or_null(&tgtport->ls_req_list,
> > + struct nvmet_fc_ls_req_op,
> > + lsreq_list);
> > + if (!lsop)
> > + break;
>
> while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list,
> struct nvmet_fc_ls_req_op, lsreq_list))) {
>
> > +
> > + list_del(&lsop->lsreq_list);
>
> Another case where I'd really love to help the list_pop helper Linus
> flamed me for :(
Sure, will change these for(;;) loops into while() loops.
BTW, bcachefs seems to have it:
#define container_of_or_null(ptr, type, member) \
({ \
typeof(ptr) _ptr = ptr; \
_ptr ? container_of(_ptr, type, member) : NULL; \
})
static inline struct list_head *list_pop(struct list_head *head)
{
if (list_empty(head))
return NULL;
struct list_head *ret = head->next;
list_del_init(ret);
return ret;
}
#define list_pop_entry(head, type, member) \
container_of_or_null(list_pop(head), type, member)
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 16/18] nvmet-fc: take tgtport refs for portentry
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (14 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 15/18] nvmet-fc: free pending reqs on tgtport unregister Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:19 ` Hannes Reinecke
2025-03-18 10:40 ` [PATCH v3 17/18] nvmet-fc: put ref when assoc->del_work is already scheduled Daniel Wagner
2025-03-18 10:40 ` [PATCH v3 18/18] nvme-fc: do not reference lsrsp after failure Daniel Wagner
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
Ensure that the tgtport is not going away as long portentry has a
pointer on it.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fc.c | 45 +++++++++++++++++++++++++++++++++++++++------
1 file changed, 39 insertions(+), 6 deletions(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index d10ddcb57c1b09d871152f0d9a48f93ec6dc8685..649afce908bbade0a843efc4b8b76105c164b035 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1265,6 +1265,7 @@ nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
{
lockdep_assert_held(&nvmet_fc_tgtlock);
+ nvmet_fc_tgtport_get(tgtport);
pe->tgtport = tgtport;
tgtport->pe = pe;
@@ -1284,8 +1285,10 @@ nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
unsigned long flags;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- if (pe->tgtport)
+ if (pe->tgtport) {
+ nvmet_fc_tgtport_put(pe->tgtport);
pe->tgtport->pe = NULL;
+ }
list_del(&pe->pe_list);
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
@@ -1303,8 +1306,10 @@ nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
pe = tgtport->pe;
- if (pe)
+ if (pe) {
+ nvmet_fc_tgtport_put(pe->tgtport);
pe->tgtport = NULL;
+ }
tgtport->pe = NULL;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
}
@@ -1327,6 +1332,9 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
if (tgtport->fc_target_port.node_name == pe->node_name &&
tgtport->fc_target_port.port_name == pe->port_name) {
+ if (!nvmet_fc_tgtport_get(tgtport))
+ continue;
+
WARN_ON(pe->tgtport);
tgtport->pe = pe;
pe->tgtport = tgtport;
@@ -1664,7 +1672,6 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
}
EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
-
/* ********************** FC-NVME LS RCV Handling ************************* */
@@ -2901,12 +2908,17 @@ nvmet_fc_add_port(struct nvmet_port *port)
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
+ if (!nvmet_fc_tgtport_get(tgtport))
+ continue;
+
/* a FC port can only be 1 nvmet port id */
if (!tgtport->pe) {
nvmet_fc_portentry_bind(tgtport, pe, port);
ret = 0;
} else
ret = -EALREADY;
+
+ nvmet_fc_tgtport_put(tgtport);
break;
}
}
@@ -2922,11 +2934,21 @@ static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
struct nvmet_fc_port_entry *pe = port->priv;
+ struct nvmet_fc_tgtport *tgtport = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport))
+ tgtport = pe->tgtport;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_portentry_unbind(pe);
- /* terminate any outstanding associations */
- __nvmet_fc_free_assocs(pe->tgtport);
+ if (tgtport) {
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(tgtport);
+ nvmet_fc_tgtport_put(tgtport);
+ }
kfree(pe);
}
@@ -2935,10 +2957,21 @@ static void
nvmet_fc_discovery_chg(struct nvmet_port *port)
{
struct nvmet_fc_port_entry *pe = port->priv;
- struct nvmet_fc_tgtport *tgtport = pe->tgtport;
+ struct nvmet_fc_tgtport *tgtport = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport))
+ tgtport = pe->tgtport;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ if (!tgtport)
+ return;
if (tgtport && tgtport->ops->discovery_event)
tgtport->ops->discovery_event(&tgtport->fc_target_port);
+
+ nvmet_fc_tgtport_put(tgtport);
}
static ssize_t
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 16/18] nvmet-fc: take tgtport refs for portentry
2025-03-18 10:40 ` [PATCH v3 16/18] nvmet-fc: take tgtport refs for portentry Daniel Wagner
@ 2025-03-18 11:19 ` Hannes Reinecke
0 siblings, 0 replies; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:19 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> Ensure that the tgtport is not going away as long portentry has a
> pointer on it.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fc.c | 45 +++++++++++++++++++++++++++++++++++++++------
> 1 file changed, 39 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
> index d10ddcb57c1b09d871152f0d9a48f93ec6dc8685..649afce908bbade0a843efc4b8b76105c164b035 100644
> --- a/drivers/nvme/target/fc.c
> +++ b/drivers/nvme/target/fc.c
> @@ -1265,6 +1265,7 @@ nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
> {
> lockdep_assert_held(&nvmet_fc_tgtlock);
>
> + nvmet_fc_tgtport_get(tgtport);
> pe->tgtport = tgtport;
> tgtport->pe = pe;
>
> @@ -1284,8 +1285,10 @@ nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
> unsigned long flags;
>
> spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
> - if (pe->tgtport)
> + if (pe->tgtport) {
> + nvmet_fc_tgtport_put(pe->tgtport);
> pe->tgtport->pe = NULL;
> + }
> list_del(&pe->pe_list);
> spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
> }
> @@ -1303,8 +1306,10 @@ nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
>
> spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
> pe = tgtport->pe;
> - if (pe)
> + if (pe) {
> + nvmet_fc_tgtport_put(pe->tgtport);
> pe->tgtport = NULL;
> + }
> tgtport->pe = NULL;
> spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
> }
> @@ -1327,6 +1332,9 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
> list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
> if (tgtport->fc_target_port.node_name == pe->node_name &&
> tgtport->fc_target_port.port_name == pe->port_name) {
> + if (!nvmet_fc_tgtport_get(tgtport))
> + continue;
> +
> WARN_ON(pe->tgtport);
> tgtport->pe = pe;
> pe->tgtport = tgtport;
> @@ -1664,7 +1672,6 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
> }
> EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
>
> -
> /* ********************** FC-NVME LS RCV Handling ************************* */
>
>
Empty line ...
> @@ -2901,12 +2908,17 @@ nvmet_fc_add_port(struct nvmet_port *port)
> list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
> if ((tgtport->fc_target_port.node_name == traddr.nn) &&
> (tgtport->fc_target_port.port_name == traddr.pn)) {
> + if (!nvmet_fc_tgtport_get(tgtport))
> + continue;
> +
> /* a FC port can only be 1 nvmet port id */
> if (!tgtport->pe) {
> nvmet_fc_portentry_bind(tgtport, pe, port);
> ret = 0;
> } else
> ret = -EALREADY;
> +
> + nvmet_fc_tgtport_put(tgtport);
> break;
> }
> }
> @@ -2922,11 +2934,21 @@ static void
> nvmet_fc_remove_port(struct nvmet_port *port)
> {
> struct nvmet_fc_port_entry *pe = port->priv;
> + struct nvmet_fc_tgtport *tgtport = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
> + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport))
> + tgtport = pe->tgtport;
> + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
>
> nvmet_fc_portentry_unbind(pe);
>
> - /* terminate any outstanding associations */
> - __nvmet_fc_free_assocs(pe->tgtport);
> + if (tgtport) {
> + /* terminate any outstanding associations */
> + __nvmet_fc_free_assocs(tgtport);
> + nvmet_fc_tgtport_put(tgtport);
> + }
>
> kfree(pe);
> }
> @@ -2935,10 +2957,21 @@ static void
> nvmet_fc_discovery_chg(struct nvmet_port *port)
> {
> struct nvmet_fc_port_entry *pe = port->priv;
> - struct nvmet_fc_tgtport *tgtport = pe->tgtport;
> + struct nvmet_fc_tgtport *tgtport = NULL;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
> + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport))
> + tgtport = pe->tgtport;
> + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
> +
> + if (!tgtport)
> + return;
>
> if (tgtport && tgtport->ops->discovery_event)
> tgtport->ops->discovery_event(&tgtport->fc_target_port);
> +
> + nvmet_fc_tgtport_put(tgtport);
> }
>
> static ssize_t
>
Otherwise:
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 17/18] nvmet-fc: put ref when assoc->del_work is already scheduled
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (15 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 16/18] nvmet-fc: take tgtport refs for portentry Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:20 ` Hannes Reinecke
2025-03-21 6:19 ` Christoph Hellwig
2025-03-18 10:40 ` [PATCH v3 18/18] nvme-fc: do not reference lsrsp after failure Daniel Wagner
17 siblings, 2 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
Do not leak the tgtport reference when the work is already scheduled.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/target/fc.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 649afce908bbade0a843efc4b8b76105c164b035..e027986e35098acbe5f97dcbcc845b9d46b88923 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1087,7 +1087,8 @@ static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
nvmet_fc_tgtport_get(assoc->tgtport);
- queue_work(nvmet_wq, &assoc->del_work);
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ nvmet_fc_tgtport_put(assoc->tgtport);
}
static bool
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 17/18] nvmet-fc: put ref when assoc->del_work is already scheduled
2025-03-18 10:40 ` [PATCH v3 17/18] nvmet-fc: put ref when assoc->del_work is already scheduled Daniel Wagner
@ 2025-03-18 11:20 ` Hannes Reinecke
2025-03-21 6:19 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:20 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> Do not leak the tgtport reference when the work is already scheduled.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/target/fc.c | 3 ++-
> 1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
> index 649afce908bbade0a843efc4b8b76105c164b035..e027986e35098acbe5f97dcbcc845b9d46b88923 100644
> --- a/drivers/nvme/target/fc.c
> +++ b/drivers/nvme/target/fc.c
> @@ -1087,7 +1087,8 @@ static void
> nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
> {
> nvmet_fc_tgtport_get(assoc->tgtport);
> - queue_work(nvmet_wq, &assoc->del_work);
> + if (!queue_work(nvmet_wq, &assoc->del_work))
> + nvmet_fc_tgtport_put(assoc->tgtport);
> }
>
> static bool
>
Reviewed-by: Hannes Reinecke <hare@suse.de>
cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 17/18] nvmet-fc: put ref when assoc->del_work is already scheduled
2025-03-18 10:40 ` [PATCH v3 17/18] nvmet-fc: put ref when assoc->del_work is already scheduled Daniel Wagner
2025-03-18 11:20 ` Hannes Reinecke
@ 2025-03-21 6:19 ` Christoph Hellwig
1 sibling, 0 replies; 62+ messages in thread
From: Christoph Hellwig @ 2025-03-21 6:19 UTC (permalink / raw)
To: Daniel Wagner
Cc: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni,
Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 11:40:11AM +0100, Daniel Wagner wrote:
> Do not leak the tgtport reference when the work is already scheduled.
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 62+ messages in thread
* [PATCH v3 18/18] nvme-fc: do not reference lsrsp after failure
2025-03-18 10:39 [PATCH v3 00/18] nvmet-fcloop: track resources via reference counting Daniel Wagner
` (16 preceding siblings ...)
2025-03-18 10:40 ` [PATCH v3 17/18] nvmet-fc: put ref when assoc->del_work is already scheduled Daniel Wagner
@ 2025-03-18 10:40 ` Daniel Wagner
2025-03-18 11:33 ` Hannes Reinecke
17 siblings, 1 reply; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 10:40 UTC (permalink / raw)
To: James Smart, Christoph Hellwig, Sagi Grimberg, Chaitanya Kulkarni
Cc: Hannes Reinecke, Keith Busch, linux-nvme, linux-kernel,
Daniel Wagner
The lsrsp object is maintained by the LLDD. The lifetime of the lsrsp
object is implicit. Because there is no explicit cleanup/free call into
the LLDD, it is not safe to assume after xml_rsp_fails, that the lsrsp
is still valid. The LLDD could have freed the object already.
With the recent changes how fcloop tracks the resources, this is the
case. Thus don't access lsrsp after xml_rsp_fails.
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
drivers/nvme/host/fc.c | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b9929a5a7f4e3f3a03953379aceb90f0c1a0b561..2c32ba9ee688d7a683bbbf8fc57a5f9b32b2ab8d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1410,9 +1410,8 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
}
static void
-nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+nvme_fc_xmt_ls_rsp_free(struct nvmefc_ls_rcv_op *lsop)
{
- struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
struct nvme_fc_rport *rport = lsop->rport;
struct nvme_fc_lport *lport = rport->lport;
unsigned long flags;
@@ -1433,6 +1432,14 @@ nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
nvme_fc_rport_put(rport);
}
+static void
+nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
+{
+ struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
+
+ nvme_fc_xmt_ls_rsp_free(lsop);
+}
+
static void
nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
{
@@ -1450,7 +1457,7 @@ nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
dev_warn(lport->dev,
"LLDD rejected LS RSP xmt: LS %d status %d\n",
w0->ls_cmd, ret);
- nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
+ nvme_fc_xmt_ls_rsp_free(lsop);
return;
}
}
--
2.48.1
^ permalink raw reply related [flat|nested] 62+ messages in thread* Re: [PATCH v3 18/18] nvme-fc: do not reference lsrsp after failure
2025-03-18 10:40 ` [PATCH v3 18/18] nvme-fc: do not reference lsrsp after failure Daniel Wagner
@ 2025-03-18 11:33 ` Hannes Reinecke
2025-03-18 14:01 ` Daniel Wagner
0 siblings, 1 reply; 62+ messages in thread
From: Hannes Reinecke @ 2025-03-18 11:33 UTC (permalink / raw)
To: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni
Cc: Keith Busch, linux-nvme, linux-kernel
On 3/18/25 11:40, Daniel Wagner wrote:
> The lsrsp object is maintained by the LLDD. The lifetime of the lsrsp
> object is implicit. Because there is no explicit cleanup/free call into
> the LLDD, it is not safe to assume after xml_rsp_fails, that the lsrsp
> is still valid. The LLDD could have freed the object already.
>
> With the recent changes how fcloop tracks the resources, this is the
> case. Thus don't access lsrsp after xml_rsp_fails.
>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
> drivers/nvme/host/fc.c | 13 ++++++++++---
> 1 file changed, 10 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
> index b9929a5a7f4e3f3a03953379aceb90f0c1a0b561..2c32ba9ee688d7a683bbbf8fc57a5f9b32b2ab8d 100644
> --- a/drivers/nvme/host/fc.c
> +++ b/drivers/nvme/host/fc.c
> @@ -1410,9 +1410,8 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
> }
>
> static void
> -nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
> +nvme_fc_xmt_ls_rsp_free(struct nvmefc_ls_rcv_op *lsop)
> {
> - struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
> struct nvme_fc_rport *rport = lsop->rport;
> struct nvme_fc_lport *lport = rport->lport;
> unsigned long flags;
> @@ -1433,6 +1432,14 @@ nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
> nvme_fc_rport_put(rport);
> }
>
> +static void
> +nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
> +{
> + struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
> +
> + nvme_fc_xmt_ls_rsp_free(lsop);
> +}
> +
> static void
> nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
> {
> @@ -1450,7 +1457,7 @@ nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
> dev_warn(lport->dev,
> "LLDD rejected LS RSP xmt: LS %d status %d\n",
> w0->ls_cmd, ret);
> - nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
> + nvme_fc_xmt_ls_rsp_free(lsop);
> return;
> }
> }
>
Hmm. That is a weird change. 'lsop->lsrsp' clearly _was_ valid just before:
ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
lsop->lsrsp);
if (ret) {
dev_warn(lport->dev,
"LLDD rejected LS RSP xmt: LS %d status %d\n",
w0->ls_cmd, ret);
nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
return;
}
so ->xmt_ls_rsp() would have invalidated one of the arguments.
Plus 'nvme_fc_xmt_ls_rsp_done()' is now a wrapper around
'nvme_fc_xmt_ls_rsp_free()'.
So why not go the full length and kill nvme_fc_xmt_ls_rsp_done()
completely?
Hmm?
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 62+ messages in thread* Re: [PATCH v3 18/18] nvme-fc: do not reference lsrsp after failure
2025-03-18 11:33 ` Hannes Reinecke
@ 2025-03-18 14:01 ` Daniel Wagner
0 siblings, 0 replies; 62+ messages in thread
From: Daniel Wagner @ 2025-03-18 14:01 UTC (permalink / raw)
To: Hannes Reinecke
Cc: Daniel Wagner, James Smart, Christoph Hellwig, Sagi Grimberg,
Chaitanya Kulkarni, Keith Busch, linux-nvme, linux-kernel
On Tue, Mar 18, 2025 at 12:33:22PM +0100, Hannes Reinecke wrote:
> ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
> lsop->lsrsp);
> if (ret) {
> dev_warn(lport->dev,
> "LLDD rejected LS RSP xmt: LS %d status %d\n",
> w0->ls_cmd, ret);
> nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
> return;
> }
>
> so ->xmt_ls_rsp() would have invalidated one of the arguments.
> Plus 'nvme_fc_xmt_ls_rsp_done()' is now a wrapper around
> 'nvme_fc_xmt_ls_rsp_free()'.
> So why not go the full length and kill nvme_fc_xmt_ls_rsp_done()
> completely?
nvme_fc_xmt_ls_rsp_done will be called when the LS has been processed.
We still need it.
> Hmm?
It is very confusing with all these callbacks and the name scheme.
^ permalink raw reply [flat|nested] 62+ messages in thread