Linux CXL
 help / color / mirror / Atom feed
From: Ira Weiny <ira.weiny@intel.com>
To: Dan Williams <dan.j.williams@intel.com>, <linux-cxl@vger.kernel.org>
Cc: <ira.weiny@intel.com>
Subject: Re: [PATCH v2 2/4] cxl/pci: Cleanup 'sanitize' to always poll
Date: Fri, 29 Sep 2023 16:49:52 -0700	[thread overview]
Message-ID: <651762a0a9fef_168c4629434@iweiny-mobl.notmuch> (raw)
In-Reply-To: <169602897906.904193.9057960720070253436.stgit@dwillia2-xfh.jf.intel.com>

Dan Williams wrote:
> In preparation for fixing the init/teardown of the 'sanitize' workqueue
> and sysfs notification mechanism, arrange for cxl_mbox_sanitize_work()
> to be the single location where the sysfs attribute is notified. With
> that change there is no distinction between polled mode and interrupt
> mode. All the interrupt does is accelerate the polling interval.
> 
> The change to check for "mds->security.sanitize_node" under the lock is
> there to ensure that the interrupt, the work routine and the
> setup/teardown code can all have a consistent view of the registered
> notifier and the workqueue state. I.e. the expectation is that the
> interrupt is live past the point that the sanitize sysfs attribute is
> published, and it may race teardown, so it must be consulted under a
> lock.
> 
> Lastly, some opportunistic replacements of
> "queue_delayed_work(system_wq, ...)", which is just open coded
> schedule_delayed_work(), are included.
> 
> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
> ---
>  drivers/cxl/core/memdev.c |    3 +-
>  drivers/cxl/cxlmem.h      |    2 --
>  drivers/cxl/pci.c         |   60 +++++++++++++++++++--------------------------
>  3 files changed, 26 insertions(+), 39 deletions(-)
> 
> diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
> index 14b547c07f54..2a7a07f6d165 100644
> --- a/drivers/cxl/core/memdev.c
> +++ b/drivers/cxl/core/memdev.c
> @@ -561,8 +561,7 @@ static void cxl_memdev_security_shutdown(struct device *dev)
>  	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
>  	struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
>  
> -	if (mds->security.poll)
> -		cancel_delayed_work_sync(&mds->security.poll_dwork);
> +	cancel_delayed_work_sync(&mds->security.poll_dwork);
>  }
>  
>  static void cxl_memdev_shutdown(struct device *dev)
> diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
> index 706f8a6d1ef4..55f00ad17a77 100644
> --- a/drivers/cxl/cxlmem.h
> +++ b/drivers/cxl/cxlmem.h
> @@ -360,7 +360,6 @@ struct cxl_fw_state {
>   *
>   * @state: state of last security operation
>   * @enabled_cmds: All security commands enabled in the CEL
> - * @poll: polling for sanitization is enabled, device has no mbox irq support
>   * @poll_tmo_secs: polling timeout
>   * @poll_dwork: polling work item
>   * @sanitize_node: sanitation sysfs file to notify
> @@ -368,7 +367,6 @@ struct cxl_fw_state {
>  struct cxl_security_state {
>  	unsigned long state;
>  	DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
> -	bool poll;
>  	int poll_tmo_secs;
>  	struct delayed_work poll_dwork;
>  	struct kernfs_node *sanitize_node;
> diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
> index aa1b3dd9e64c..ac4e434b0806 100644
> --- a/drivers/cxl/pci.c
> +++ b/drivers/cxl/pci.c
> @@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
>  	reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
>  	opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
>  	if (opcode == CXL_MBOX_OP_SANITIZE) {
> +		mutex_lock(&mds->mbox_mutex);
>  		if (mds->security.sanitize_node)
> -			sysfs_notify_dirent(mds->security.sanitize_node);
> -
> -		dev_dbg(cxlds->dev, "Sanitization operation ended\n");
> +			mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
> +		mutex_unlock(&mds->mbox_mutex);
>  	} else {
>  		/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
>  		rcuwait_wake_up(&mds->mbox_wait);
> @@ -160,8 +160,7 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
>  		int timeout = mds->security.poll_tmo_secs + 10;
>  
>  		mds->security.poll_tmo_secs = min(15 * 60, timeout);
> -		queue_delayed_work(system_wq, &mds->security.poll_dwork,
> -				   timeout * HZ);
> +		schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
>  	}
>  	mutex_unlock(&mds->mbox_mutex);
>  }
> @@ -293,15 +292,11 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
>  		 * and allow userspace to poll(2) for completion.
>  		 */
>  		if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
> -			if (mds->security.poll) {
> -				/* give first timeout a second */
> -				timeout = 1;
> -				mds->security.poll_tmo_secs = timeout;
> -				queue_delayed_work(system_wq,
> -						   &mds->security.poll_dwork,
> -						   timeout * HZ);
> -			}
> -
> +			/* give first timeout a second */
> +			timeout = 1;
> +			mds->security.poll_tmo_secs = timeout;
> +			schedule_delayed_work(&mds->security.poll_dwork,
> +					      timeout * HZ);
>  			dev_dbg(dev, "Sanitization operation started\n");
>  			goto success;
>  		}
> @@ -384,7 +379,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
>  	const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
>  	struct device *dev = cxlds->dev;
>  	unsigned long timeout;
> +	int irq, msgnum;
>  	u64 md_status;
> +	u32 ctrl;
>  
>  	timeout = jiffies + mbox_ready_timeout * HZ;
>  	do {
> @@ -432,33 +429,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
>  	dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
>  
>  	rcuwait_init(&mds->mbox_wait);
> +	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);

NIT: The change below was ugly to review...


>  
> -	if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
> -		u32 ctrl;
> -		int irq, msgnum;
> -		struct pci_dev *pdev = to_pci_dev(cxlds->dev);
> -
> -		msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
> -		irq = pci_irq_vector(pdev, msgnum);
> -		if (irq < 0)
> -			goto mbox_poll;
> -
> -		if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
> -			goto mbox_poll;
> +	/* background command interrupts are optional */
> +	if ((cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) == 0)
> +		return 0;
>  
> -		/* enable background command mbox irq support */
> -		ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
> -		ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
> -		writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
> +	msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
> +	irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
> +	if (irq < 0)
> +		return 0;
>  
> +	if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
>  		return 0;
> -	}
>  
> -mbox_poll:
> -	mds->security.poll = true;
> -	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
> +	dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
> +	/* enable background command mbox irq support */
> +	ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
> +	ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
> +	writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
>  
> -	dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
>  	return 0;
>  }
>  
> 



  reply	other threads:[~2023-09-29 23:50 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-29 23:09 [PATCH v2 0/4] cxl/mem: Fix shutdown order Dan Williams
2023-09-29 23:09 ` [PATCH v2 1/4] cxl/pci: Remove unnecessary device reference management in sanitize work Dan Williams
2023-09-29 23:41   ` Ira Weiny
2023-10-02  9:55   ` Jonathan Cameron
2023-10-02 15:27   ` Davidlohr Bueso
2023-10-02 16:48   ` Dave Jiang
2023-09-29 23:09 ` [PATCH v2 2/4] cxl/pci: Cleanup 'sanitize' to always poll Dan Williams
2023-09-29 23:49   ` Ira Weiny [this message]
2023-09-29 23:51   ` Ira Weiny
2023-10-02 10:02   ` Jonathan Cameron
2023-10-04  0:55     ` Dan Williams
2023-10-02 15:49   ` Davidlohr Bueso
2023-10-04  1:01     ` Dan Williams
2023-10-04  1:13       ` Davidlohr Bueso
2023-10-02 16:57   ` Dave Jiang
2023-09-29 23:09 ` [PATCH v2 3/4] cxl/pci: Fix sanitize notifier setup Dan Williams
2023-09-30  2:42   ` Ira Weiny
2023-10-02 10:10   ` Jonathan Cameron
2023-10-04  0:59     ` Dan Williams
2023-10-04 10:12       ` Jonathan Cameron
2023-10-04 18:47         ` Dan Williams
2023-10-02 16:59   ` Dave Jiang
2023-10-04  0:52   ` Davidlohr Bueso
2023-10-04  1:09     ` Dan Williams
2023-10-04 16:21       ` Davidlohr Bueso
2023-10-04 18:48         ` Dan Williams
2023-10-04 18:50         ` Dan Williams
2023-10-04 18:54         ` Dan Williams
2023-10-04 19:23           ` Davidlohr Bueso
2023-09-29 23:09 ` [PATCH v2 4/4] cxl/mem: Fix shutdown order Dan Williams
2023-09-29 23:52   ` Ira Weiny
2023-10-02 10:11     ` Jonathan Cameron
2023-10-02 16:59   ` Dave Jiang
2023-10-03 17:40   ` Davidlohr Bueso

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=651762a0a9fef_168c4629434@iweiny-mobl.notmuch \
    --to=ira.weiny@intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=linux-cxl@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox