Linux CXL
 help / color / mirror / Atom feed
* [PATCH] cxl/mbox: Use guard() for mbox_mutex locking
@ 2026-04-28 21:22 Davidlohr Bueso
  2026-04-28 22:38 ` Dave Jiang
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Davidlohr Bueso @ 2026-04-28 21:22 UTC (permalink / raw)
  To: dave.jiang
  Cc: jic23, alison.schofield, ira.weiny, djbw, linux-cxl,
	Davidlohr Bueso

Use the new helpers and simplify the code. No change in
semantics.

Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---
 drivers/cxl/core/memdev.c | 16 ++++++++--------
 drivers/cxl/pci.c         | 21 ++++++++-------------
 2 files changed, 16 insertions(+), 21 deletions(-)

diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 3db4e91170a8..0c9067ced7b3 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -138,10 +138,10 @@ static ssize_t security_state_show(struct device *dev,
 	int rc = 0;
 
 	/* sync with latest submission state */
-	mutex_lock(&cxl_mbox->mbox_mutex);
-	if (mds->security.sanitize_active)
-		rc = sysfs_emit(buf, "sanitize\n");
-	mutex_unlock(&cxl_mbox->mbox_mutex);
+	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
+		if (mds->security.sanitize_active)
+			rc = sysfs_emit(buf, "sanitize\n");
+	}
 	if (rc)
 		return rc;
 
@@ -1247,10 +1247,10 @@ static void sanitize_teardown_notifier(void *data)
 	 * Prevent new irq triggered invocations of the workqueue and
 	 * flush inflight invocations.
 	 */
-	mutex_lock(&cxl_mbox->mbox_mutex);
-	state = mds->security.sanitize_node;
-	mds->security.sanitize_node = NULL;
-	mutex_unlock(&cxl_mbox->mbox_mutex);
+	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
+		state = mds->security.sanitize_node;
+		mds->security.sanitize_node = NULL;
+	}
 
 	cancel_delayed_work_sync(&mds->security.poll_dwork);
 	sysfs_put(state);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 95bf773aab14..3462cea6e61b 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -134,10 +134,11 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
 	reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
 	opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
 	if (opcode == CXL_MBOX_OP_SANITIZE) {
-		mutex_lock(&cxl_mbox->mbox_mutex);
-		if (mds->security.sanitize_node)
-			mod_delayed_work(system_percpu_wq, &mds->security.poll_dwork, 0);
-		mutex_unlock(&cxl_mbox->mbox_mutex);
+		scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
+			if (mds->security.sanitize_node)
+				mod_delayed_work(system_percpu_wq,
+						 &mds->security.poll_dwork, 0);
+		}
 	} else {
 		/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
 		rcuwait_wake_up(&cxl_mbox->mbox_wait);
@@ -156,7 +157,7 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
 	struct cxl_dev_state *cxlds = &mds->cxlds;
 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
 
-	mutex_lock(&cxl_mbox->mbox_mutex);
+	guard(mutex)(&cxl_mbox->mbox_mutex);
 	if (cxl_mbox_background_complete(cxlds)) {
 		mds->security.poll_tmo_secs = 0;
 		if (mds->security.sanitize_node)
@@ -170,7 +171,6 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
 		mds->security.poll_tmo_secs = min(15 * 60, timeout);
 		schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
 	}
-	mutex_unlock(&cxl_mbox->mbox_mutex);
 }
 
 /**
@@ -377,13 +377,8 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mailbox *cxl_mbox,
 static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
 			     struct cxl_mbox_cmd *cmd)
 {
-	int rc;
-
-	mutex_lock(&cxl_mbox->mbox_mutex);
-	rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
-	mutex_unlock(&cxl_mbox->mbox_mutex);
-
-	return rc;
+	guard(mutex)(&cxl_mbox->mbox_mutex);
+	return __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
 }
 
 static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
-- 
2.39.5


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH] cxl/mbox: Use guard() for mbox_mutex locking
  2026-04-28 21:22 [PATCH] cxl/mbox: Use guard() for mbox_mutex locking Davidlohr Bueso
@ 2026-04-28 22:38 ` Dave Jiang
  2026-04-28 22:47   ` Davidlohr Bueso
  2026-04-29 10:53 ` Jonathan Cameron
  2026-04-29 17:59 ` Davidlohr Bueso
  2 siblings, 1 reply; 7+ messages in thread
From: Dave Jiang @ 2026-04-28 22:38 UTC (permalink / raw)
  To: Davidlohr Bueso; +Cc: jic23, alison.schofield, ira.weiny, djbw, linux-cxl



On 4/28/26 2:22 PM, Davidlohr Bueso wrote:
> Use the new helpers and simplify the code. No change in
> semantics.
> 
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>

Reviewed-by: Dave Jiang <dave.jiang@intel.com>

Is this in prep of media ops?

> ---
>  drivers/cxl/core/memdev.c | 16 ++++++++--------
>  drivers/cxl/pci.c         | 21 ++++++++-------------
>  2 files changed, 16 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> index 3db4e91170a8..0c9067ced7b3 100644
> --- a/drivers/cxl/core/memdev.c
> +++ b/drivers/cxl/core/memdev.c
> @@ -138,10 +138,10 @@ static ssize_t security_state_show(struct device *dev,
>  	int rc = 0;
>  
>  	/* sync with latest submission state */
> -	mutex_lock(&cxl_mbox->mbox_mutex);
> -	if (mds->security.sanitize_active)
> -		rc = sysfs_emit(buf, "sanitize\n");
> -	mutex_unlock(&cxl_mbox->mbox_mutex);
> +	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
> +		if (mds->security.sanitize_active)
> +			rc = sysfs_emit(buf, "sanitize\n");
> +	}
>  	if (rc)
>  		return rc;
>  
> @@ -1247,10 +1247,10 @@ static void sanitize_teardown_notifier(void *data)
>  	 * Prevent new irq triggered invocations of the workqueue and
>  	 * flush inflight invocations.
>  	 */
> -	mutex_lock(&cxl_mbox->mbox_mutex);
> -	state = mds->security.sanitize_node;
> -	mds->security.sanitize_node = NULL;
> -	mutex_unlock(&cxl_mbox->mbox_mutex);
> +	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
> +		state = mds->security.sanitize_node;
> +		mds->security.sanitize_node = NULL;
> +	}
>  
>  	cancel_delayed_work_sync(&mds->security.poll_dwork);
>  	sysfs_put(state);
> diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
> index 95bf773aab14..3462cea6e61b 100644
> --- a/drivers/cxl/pci.c
> +++ b/drivers/cxl/pci.c
> @@ -134,10 +134,11 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
>  	reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
>  	opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
>  	if (opcode == CXL_MBOX_OP_SANITIZE) {
> -		mutex_lock(&cxl_mbox->mbox_mutex);
> -		if (mds->security.sanitize_node)
> -			mod_delayed_work(system_percpu_wq, &mds->security.poll_dwork, 0);
> -		mutex_unlock(&cxl_mbox->mbox_mutex);
> +		scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
> +			if (mds->security.sanitize_node)
> +				mod_delayed_work(system_percpu_wq,
> +						 &mds->security.poll_dwork, 0);
> +		}
>  	} else {
>  		/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
>  		rcuwait_wake_up(&cxl_mbox->mbox_wait);
> @@ -156,7 +157,7 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
>  	struct cxl_dev_state *cxlds = &mds->cxlds;
>  	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
>  
> -	mutex_lock(&cxl_mbox->mbox_mutex);
> +	guard(mutex)(&cxl_mbox->mbox_mutex);
>  	if (cxl_mbox_background_complete(cxlds)) {
>  		mds->security.poll_tmo_secs = 0;
>  		if (mds->security.sanitize_node)
> @@ -170,7 +171,6 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
>  		mds->security.poll_tmo_secs = min(15 * 60, timeout);
>  		schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
>  	}
> -	mutex_unlock(&cxl_mbox->mbox_mutex);
>  }
>  
>  /**
> @@ -377,13 +377,8 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mailbox *cxl_mbox,
>  static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
>  			     struct cxl_mbox_cmd *cmd)
>  {
> -	int rc;
> -
> -	mutex_lock(&cxl_mbox->mbox_mutex);
> -	rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
> -	mutex_unlock(&cxl_mbox->mbox_mutex);
> -
> -	return rc;
> +	guard(mutex)(&cxl_mbox->mbox_mutex);
> +	return __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
>  }
>  
>  static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] cxl/mbox: Use guard() for mbox_mutex locking
  2026-04-28 22:38 ` Dave Jiang
@ 2026-04-28 22:47   ` Davidlohr Bueso
  2026-04-29 15:35     ` Dave Jiang
  0 siblings, 1 reply; 7+ messages in thread
From: Davidlohr Bueso @ 2026-04-28 22:47 UTC (permalink / raw)
  To: Dave Jiang; +Cc: jic23, alison.schofield, ira.weiny, djbw, linux-cxl

On Tue, 28 Apr 2026, Dave Jiang wrote:

>
>On 4/28/26 2:22 PM, Davidlohr Bueso wrote:
>> Use the new helpers and simplify the code. No change in
>> semantics.
>>
>> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
>
>Reviewed-by: Dave Jiang <dave.jiang@intel.com>
>
>Is this in prep of media ops?

I found the open coded locking by doing the media ops
patch, but ultimately these changes are independent.

Thanks,
Davidlohr

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] cxl/mbox: Use guard() for mbox_mutex locking
  2026-04-28 21:22 [PATCH] cxl/mbox: Use guard() for mbox_mutex locking Davidlohr Bueso
  2026-04-28 22:38 ` Dave Jiang
@ 2026-04-29 10:53 ` Jonathan Cameron
  2026-04-29 17:59 ` Davidlohr Bueso
  2 siblings, 0 replies; 7+ messages in thread
From: Jonathan Cameron @ 2026-04-29 10:53 UTC (permalink / raw)
  To: Davidlohr Bueso; +Cc: dave.jiang, alison.schofield, ira.weiny, djbw, linux-cxl

On Tue, 28 Apr 2026 14:22:20 -0700
Davidlohr Bueso <dave@stgolabs.net> wrote:

> Use the new helpers and simplify the code. No change in
> semantics.
Does it simplify the code?  I'm not sure these particular
ones are that helpful.

Don't get me wrong, I love guards() and using them in new
code is fine but I'm not sure it's worth the churn if we don't
see a significant advantage.

Some specific comments inline.  I think with a few tweaks
the advantages become greater and outweight the churn aspect
- others may disagree!

> 
> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
> ---
>  drivers/cxl/core/memdev.c | 16 ++++++++--------
>  drivers/cxl/pci.c         | 21 ++++++++-------------
>  2 files changed, 16 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> index 3db4e91170a8..0c9067ced7b3 100644
> --- a/drivers/cxl/core/memdev.c
> +++ b/drivers/cxl/core/memdev.c
> @@ -138,10 +138,10 @@ static ssize_t security_state_show(struct device *dev,
>  	int rc = 0;
>  
>  	/* sync with latest submission state */
> -	mutex_lock(&cxl_mbox->mbox_mutex);
> -	if (mds->security.sanitize_active)
> -		rc = sysfs_emit(buf, "sanitize\n");
> -	mutex_unlock(&cxl_mbox->mbox_mutex);
> +	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
> +		if (mds->security.sanitize_active)
> +			rc = sysfs_emit(buf, "sanitize\n");
> +	}
>  	if (rc)
>  		return rc;
As it stands, if anything this is worse from readability point of view
as the setting of rc is getting further away.
However... If we take a it a step further I think it would be worth doing.

	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
		if (mds->security.santize_active) {
			rc = sysfs_emit(buf, "sanitize\n");
			if (rc)
				return rc;
		}
	}
+ can drop the rc init at the top.
That way the scoped_guard() is letting us move the error check nearer
the source of the error and is a win.

>  
> @@ -1247,10 +1247,10 @@ static void sanitize_teardown_notifier(void *data)
>  	 * Prevent new irq triggered invocations of the workqueue and
>  	 * flush inflight invocations.
>  	 */
> -	mutex_lock(&cxl_mbox->mbox_mutex);
> -	state = mds->security.sanitize_node;
> -	mds->security.sanitize_node = NULL;
> -	mutex_unlock(&cxl_mbox->mbox_mutex);
> +	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
> +		state = mds->security.sanitize_node;
> +		mds->security.sanitize_node = NULL;
> +	}

This one is churn for me. But if it's the only manual
mutex handling int he file that is left, fair enough.

>  
>  	cancel_delayed_work_sync(&mds->security.poll_dwork);
>  	sysfs_put(state);
> diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
> index 95bf773aab14..3462cea6e61b 100644
> --- a/drivers/cxl/pci.c
> +++ b/drivers/cxl/pci.c
> @@ -134,10 +134,11 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
>  	reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
>  	opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
>  	if (opcode == CXL_MBOX_OP_SANITIZE) {
> -		mutex_lock(&cxl_mbox->mbox_mutex);
> -		if (mds->security.sanitize_node)
> -			mod_delayed_work(system_percpu_wq, &mds->security.poll_dwork, 0);
> -		mutex_unlock(&cxl_mbox->mbox_mutex);
> +		scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
> +			if (mds->security.sanitize_node)
> +				mod_delayed_work(system_percpu_wq,
> +						 &mds->security.poll_dwork, 0);

This one I dislike because the indent is getting larger for no
major readability advantage. GIven the scope is tightly defined anyway how about
instead doing:

	if (opcode == CXL_MBOX_OP_SANTIZE) {
		guard(mutex)(&cxl_mbox.santize_mode);
		if (mds->security.sanitize_node)
			mod_delayed_work(system_percpu_wq, &mds->security.poll_dwork, 0);
>  	} else {
>  		/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
>  		rcuwait_wake_up(&cxl_mbox->mbox_wait);
> @@ -156,7 +157,7 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
>  	struct cxl_dev_state *cxlds = &mds->cxlds;
>  	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
>  
> -	mutex_lock(&cxl_mbox->mbox_mutex);
> +	guard(mutex)(&cxl_mbox->mbox_mutex);
>  	if (cxl_mbox_background_complete(cxlds)) {
>  		mds->security.poll_tmo_secs = 0;
>  		if (mds->security.sanitize_node)
> @@ -170,7 +171,6 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
>  		mds->security.poll_tmo_secs = min(15 * 60, timeout);
>  		schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
>  	}
> -	mutex_unlock(&cxl_mbox->mbox_mutex);
>  }
>  
>  /**
> @@ -377,13 +377,8 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mailbox *cxl_mbox,
>  static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox,
>  			     struct cxl_mbox_cmd *cmd)
>  {
> -	int rc;
> -
> -	mutex_lock(&cxl_mbox->mbox_mutex);
> -	rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
> -	mutex_unlock(&cxl_mbox->mbox_mutex);
> -
> -	return rc;
> +	guard(mutex)(&cxl_mbox->mbox_mutex);
> +	return __cxl_pci_mbox_send_cmd(cxl_mbox, cmd);
I like these really simple ones just for saving lines of code.
So this is good as far as I'm concerned.

>  }
>  
>  static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] cxl/mbox: Use guard() for mbox_mutex locking
  2026-04-28 22:47   ` Davidlohr Bueso
@ 2026-04-29 15:35     ` Dave Jiang
  2026-04-29 16:43       ` Davidlohr Bueso
  0 siblings, 1 reply; 7+ messages in thread
From: Dave Jiang @ 2026-04-29 15:35 UTC (permalink / raw)
  To: Davidlohr Bueso; +Cc: jic23, alison.schofield, ira.weiny, djbw, linux-cxl



On 4/28/26 3:47 PM, Davidlohr Bueso wrote:
> On Tue, 28 Apr 2026, Dave Jiang wrote:
> 
>>
>> On 4/28/26 2:22 PM, Davidlohr Bueso wrote:
>>> Use the new helpers and simplify the code. No change in
>>> semantics.
>>>
>>> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
>>
>> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
>>
>> Is this in prep of media ops?
> 
> I found the open coded locking by doing the media ops
> patch, but ultimately these changes are independent.

May not be worth the churn if it's not part of ongoing work.

> 
> Thanks,
> Davidlohr


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] cxl/mbox: Use guard() for mbox_mutex locking
  2026-04-29 15:35     ` Dave Jiang
@ 2026-04-29 16:43       ` Davidlohr Bueso
  0 siblings, 0 replies; 7+ messages in thread
From: Davidlohr Bueso @ 2026-04-29 16:43 UTC (permalink / raw)
  To: Dave Jiang; +Cc: jic23, alison.schofield, ira.weiny, djbw, linux-cxl

On Wed, 29 Apr 2026, Dave Jiang wrote:

>May not be worth the churn if it's not part of ongoing work.

I don't disagree, also considering Jonathan not being fond of
the change, lets just ignore the patch altogether.

Thanks,
Davidlohr

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH] cxl/mbox: Use guard() for mbox_mutex locking
  2026-04-28 21:22 [PATCH] cxl/mbox: Use guard() for mbox_mutex locking Davidlohr Bueso
  2026-04-28 22:38 ` Dave Jiang
  2026-04-29 10:53 ` Jonathan Cameron
@ 2026-04-29 17:59 ` Davidlohr Bueso
  2 siblings, 0 replies; 7+ messages in thread
From: Davidlohr Bueso @ 2026-04-29 17:59 UTC (permalink / raw)
  To: dave.jiang; +Cc: jic23, alison.schofield, ira.weiny, djbw, linux-cxl

On Tue, 28 Apr 2026, Davidlohr Bueso wrote:

>Use the new helpers and simplify the code. No change in
>semantics.
>
>Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
>---
> drivers/cxl/core/memdev.c | 16 ++++++++--------
> drivers/cxl/pci.c         | 21 ++++++++-------------
> 2 files changed, 16 insertions(+), 21 deletions(-)
>
>diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
>index 3db4e91170a8..0c9067ced7b3 100644
>--- a/drivers/cxl/core/memdev.c
>+++ b/drivers/cxl/core/memdev.c
>@@ -138,10 +138,10 @@ static ssize_t security_state_show(struct device *dev,
> 	int rc = 0;
>
> 	/* sync with latest submission state */
>-	mutex_lock(&cxl_mbox->mbox_mutex);
>-	if (mds->security.sanitize_active)
>-		rc = sysfs_emit(buf, "sanitize\n");
>-	mutex_unlock(&cxl_mbox->mbox_mutex);
>+	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
>+		if (mds->security.sanitize_active)
>+			rc = sysfs_emit(buf, "sanitize\n");
>+	}
> 	if (rc)
> 		return rc;
>
>@@ -1247,10 +1247,10 @@ static void sanitize_teardown_notifier(void *data)
> 	 * Prevent new irq triggered invocations of the workqueue and
> 	 * flush inflight invocations.
> 	 */
>-	mutex_lock(&cxl_mbox->mbox_mutex);
>-	state = mds->security.sanitize_node;
>-	mds->security.sanitize_node = NULL;
>-	mutex_unlock(&cxl_mbox->mbox_mutex);
>+	scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
>+		state = mds->security.sanitize_node;
>+		mds->security.sanitize_node = NULL;
>+	}
>
> 	cancel_delayed_work_sync(&mds->security.poll_dwork);
> 	sysfs_put(state);
>diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
>index 95bf773aab14..3462cea6e61b 100644
>--- a/drivers/cxl/pci.c
>+++ b/drivers/cxl/pci.c
>@@ -134,10 +134,11 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
> 	reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
> 	opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
> 	if (opcode == CXL_MBOX_OP_SANITIZE) {
>-		mutex_lock(&cxl_mbox->mbox_mutex);
>-		if (mds->security.sanitize_node)
>-			mod_delayed_work(system_percpu_wq, &mds->security.poll_dwork, 0);
>-		mutex_unlock(&cxl_mbox->mbox_mutex);
>+		scoped_guard(mutex, &cxl_mbox->mbox_mutex) {
>+			if (mds->security.sanitize_node)
>+				mod_delayed_work(system_percpu_wq,
>+						 &mds->security.poll_dwork, 0);
>+		}
> 	} else {
> 		/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
> 		rcuwait_wake_up(&cxl_mbox->mbox_wait);
>@@ -156,7 +157,7 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
> 	struct cxl_dev_state *cxlds = &mds->cxlds;
> 	struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
>
>-	mutex_lock(&cxl_mbox->mbox_mutex);
>+	guard(mutex)(&cxl_mbox->mbox_mutex);
> 	if (cxl_mbox_background_complete(cxlds)) {
> 		mds->security.poll_tmo_secs = 0;
> 		if (mds->security.sanitize_node)
>@@ -170,7 +171,6 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
> 		mds->security.poll_tmo_secs = min(15 * 60, timeout);
> 		schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);

Sashiko had some feedback, which was a false concern. Copy pasting here as I
don't know how to reply to it yet.

> This isn't a regression introduced by this patch, but looking at this code,
> can this unconditionally re-arm the delayed work during teardown and cause
> a use-after-free?
>
> During device teardown, sanitize_teardown_notifier() clears the sanitize_node
> to NULL under the mbox_mutex and then drops the lock to call
> cancel_delayed_work_sync().
>
> If cxl_mbox_sanitize_work() is executing concurrently,
> cancel_delayed_work_sync() will wait for it to finish. Meanwhile, the work
> function acquires mbox_mutex, sees the command is not complete, and
> calls schedule_delayed_work():
> 
> CPU1
> sanitize_teardown_notifier()
>    mds->security.sanitize_node = NULL;
>    cancel_delayed_work_sync(&mds->security.poll_dwork);
> CPU2
> cxl_mbox_sanitize_work()
>    guard(mutex)(&cxl_mbox->mbox_mutex);
>    ...
>    schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ)

This is fine.

cancel_delayed_work_sync() uses WORK_CANCEL_DISABLE to bump the work's
WORK_OFFQ_DISABLE_MASK and clears the pending bit.

queue_delayed_work_on() checks the disable mask via clear_pending_if_disabled(),
between setting PENDING and adding the new work. If disabled, PENDING is cleared
again and __queue_delayed_work is not called.

> 
> Unlike the IRQ handler which safely checks if (mds->security.sanitize_node)
> before re-arming, the work function lacks this teardown check.

sanitize_node does not dictate correctness for this, it is about the status
file.

> 
> Does this result in the delayed work remaining armed after
> cancel_delayed_work_sync() returns, eventually executing on freed mds memory
> when the timer expires?

No, cancel_delayed_work_sync() is explicitly documented to handle rearming work.

Thanks,
Davidlohr

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2026-04-29 18:18 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-28 21:22 [PATCH] cxl/mbox: Use guard() for mbox_mutex locking Davidlohr Bueso
2026-04-28 22:38 ` Dave Jiang
2026-04-28 22:47   ` Davidlohr Bueso
2026-04-29 15:35     ` Dave Jiang
2026-04-29 16:43       ` Davidlohr Bueso
2026-04-29 10:53 ` Jonathan Cameron
2026-04-29 17:59 ` Davidlohr Bueso

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox