From: ira.weiny@intel.com
To: Dan Williams <dan.j.williams@intel.com>,
Bjorn Helgaas <bhelgaas@google.com>
Cc: Ira Weiny <ira.weiny@intel.com>, Lukas Wunner <lukas@wunner.de>,
Alison Schofield <alison.schofield@intel.com>,
Vishal Verma <vishal.l.verma@intel.com>,
Jonathan Cameron <Jonathan.Cameron@huawei.com>,
Gregory Price <gregory.price@memverge.com>,
"Li, Ming" <ming4.li@intel.com>,
linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-cxl@vger.kernel.org
Subject: [PATCH V2 1/2] PCI/DOE: Remove the pci_doe_flush_mb() call
Date: Tue, 22 Nov 2022 07:53:23 -0800 [thread overview]
Message-ID: <20221122155324.1878416-2-ira.weiny@intel.com> (raw)
In-Reply-To: <20221122155324.1878416-1-ira.weiny@intel.com>
From: Ira Weiny <ira.weiny@intel.com>
Each struct doe_mb is managed as part of the PCI device. They can't go
away as long as the PCI device exists. pci_doe_flush_mb() was set up to
flush the workqueue and prevent any further submissions to the mailboxes
when the PCI device goes away. Unfortunately, this was fundamentally
flawed. There was no guarantee that a struct doe_mb remained after
pci_doe_flush_mb() returned. Therefore, the doe_mb state could be
invalid when those threads waiting on the workqueue were flushed.
Fortunately the current code is safe because all callers make a
synchronous call to pci_doe_submit_task() and maintain a reference on the
PCI device.
For these reasons, pci_doe_flush_mb() will never be called while tasks
are being processed and there is no use for it.
Remove the dead code around pci_doe_flush_mb().
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
---
drivers/pci/doe.c | 48 ++++-------------------------------------------
1 file changed, 4 insertions(+), 44 deletions(-)
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index e402f05068a5..260313e9052e 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -24,10 +24,9 @@
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
#define PCI_DOE_TIMEOUT HZ
-#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
+#define PCI_DOE_POLL_INTERVAL 8
-#define PCI_DOE_FLAG_CANCEL 0
-#define PCI_DOE_FLAG_DEAD 1
+#define PCI_DOE_FLAG_DEAD 0
/**
* struct pci_doe_mb - State for a single DOE mailbox
@@ -53,15 +52,6 @@ struct pci_doe_mb {
unsigned long flags;
};
-static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
-{
- if (wait_event_timeout(doe_mb->wq,
- test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags),
- timeout))
- return -EIO;
- return 0;
-}
-
static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val)
{
struct pci_dev *pdev = doe_mb->pdev;
@@ -82,12 +72,9 @@ static int pci_doe_abort(struct pci_doe_mb *doe_mb)
pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT);
do {
- int rc;
u32 val;
- rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
- if (rc)
- return rc;
+ msleep_interruptible(PCI_DOE_POLL_INTERVAL);
pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
/* Abort success! */
@@ -278,11 +265,7 @@ static void doe_statemachine_work(struct work_struct *work)
signal_task_abort(task, -EIO);
return;
}
- rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
- if (rc) {
- signal_task_abort(task, rc);
- return;
- }
+ msleep_interruptible(PCI_DOE_POLL_INTERVAL);
goto retry_resp;
}
@@ -383,21 +366,6 @@ static void pci_doe_destroy_workqueue(void *mb)
destroy_workqueue(doe_mb->work_queue);
}
-static void pci_doe_flush_mb(void *mb)
-{
- struct pci_doe_mb *doe_mb = mb;
-
- /* Stop all pending work items from starting */
- set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
-
- /* Cancel an in progress work item, if necessary */
- set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags);
- wake_up(&doe_mb->wq);
-
- /* Flush all work items */
- flush_workqueue(doe_mb->work_queue);
-}
-
/**
* pcim_doe_create_mb() - Create a DOE mailbox object
*
@@ -450,14 +418,6 @@ struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset)
return ERR_PTR(rc);
}
- /*
- * The state machine and the mailbox should be in sync now;
- * Set up mailbox flush prior to using the mailbox to query protocols.
- */
- rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb);
- if (rc)
- return ERR_PTR(rc);
-
rc = pci_doe_cache_protocols(doe_mb);
if (rc) {
pci_err(pdev, "[%x] failed to cache protocols : %d\n",
--
2.37.2
next prev parent reply other threads:[~2022-11-22 15:53 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-22 15:53 [PATCH V2 0/2] PCI/DOE: Remove asynchronous task support ira.weiny
2022-11-22 15:53 ` ira.weiny [this message]
2022-11-22 16:34 ` [PATCH V2 1/2] PCI/DOE: Remove the pci_doe_flush_mb() call Jonathan Cameron
2022-11-23 17:35 ` Ira Weiny
2022-11-24 11:19 ` Jonathan Cameron
2022-11-22 19:53 ` Lukas Wunner
2022-11-23 17:39 ` Ira Weiny
2022-11-22 15:53 ` [PATCH V2 2/2] PCI/DOE: Remove asynchronous task support ira.weiny
2022-11-22 16:43 ` Jonathan Cameron
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221122155324.1878416-2-ira.weiny@intel.com \
--to=ira.weiny@intel.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=alison.schofield@intel.com \
--cc=bhelgaas@google.com \
--cc=dan.j.williams@intel.com \
--cc=gregory.price@memverge.com \
--cc=linux-cxl@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
--cc=lukas@wunner.de \
--cc=ming4.li@intel.com \
--cc=vishal.l.verma@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).