From: Ulf Hansson <ulf.hansson@linaro.org>
To: linux-mmc@vger.kernel.org, Ulf Hansson <ulf.hansson@linaro.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>,
Linus Walleij <linus.walleij@linaro.org>,
Wolfram Sang <wsa+renesas@sang-engineering.com>,
Shawn Lin <shawn.lin@rock-chips.com>,
Christian Lohle <CLoehle@hyperstone.com>,
linux-kernel@vger.kernel.org
Subject: [PATCH 3/3] mmc: core: Avoid hogging the CPU while polling for busy after I/O writes
Date: Fri, 2 Jul 2021 15:42:29 +0200 [thread overview]
Message-ID: <20210702134229.357717-4-ulf.hansson@linaro.org> (raw)
In-Reply-To: <20210702134229.357717-1-ulf.hansson@linaro.org>
When mmc_blk_card_busy() calls card_busy_detect() to poll for the card's
state with CMD13, this is done without any delays in between the commands
being sent.
Rather than fixing card_busy_detect() in this regards, let's instead
convert into using the common __mmc_poll_for_busy(), which also helps us to
avoid open-coding.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
---
drivers/mmc/core/block.c | 69 ++++++++++++++++----------------------
drivers/mmc/core/mmc_ops.c | 1 +
2 files changed, 30 insertions(+), 40 deletions(-)
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 0f9044cf3aab..c692f2af77f2 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -97,6 +97,11 @@ static int max_devices;
static DEFINE_IDA(mmc_blk_ida);
static DEFINE_IDA(mmc_rpmb_ida);
+struct mmc_blk_busy_data {
+ struct mmc_card *card;
+ u32 status;
+};
+
/*
* There is one mmc_blk_data per slot.
*/
@@ -411,42 +416,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
return 0;
}
-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- u32 *resp_errs)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
- int err = 0;
- u32 status;
-
- do {
- bool done = time_after(jiffies, timeout);
-
- err = __mmc_send_status(card, &status, 5);
- if (err) {
- dev_err(mmc_dev(card->host),
- "error %d requesting status\n", err);
- return err;
- }
-
- /* Accumulate any response error bits seen */
- if (resp_errs)
- *resp_errs |= status;
-
- /*
- * Timeout if the device never becomes ready for data and never
- * leaves the program state.
- */
- if (done) {
- dev_err(mmc_dev(card->host),
- "Card stuck in wrong state! %s status: %#x\n",
- __func__, status);
- return -ETIMEDOUT;
- }
- } while (!mmc_ready_for_data(status));
-
- return err;
-}
-
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
@@ -1846,28 +1815,48 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
}
+static int mmc_blk_busy_cb(void *cb_data, bool *busy)
+{
+ struct mmc_blk_busy_data *data = cb_data;
+ u32 status = 0;
+ int err;
+
+ err = mmc_send_status(data->card, &status);
+ if (err)
+ return err;
+
+ /* Accumulate response error bits. */
+ data->status |= status;
+
+ *busy = !mmc_ready_for_data(status);
+ return 0;
+}
+
static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
- u32 status = 0;
+ struct mmc_blk_busy_data cb_data;
int err;
if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
return 0;
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
+ cb_data.card = card;
+ cb_data.status = 0;
+ err = __mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, &mmc_blk_busy_cb,
+ &cb_data);
/*
* Do not assume data transferred correctly if there are any error bits
* set.
*/
- if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+ if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
mqrq->brq.data.bytes_xfered = 0;
err = err ? err : -EIO;
}
/* Copy the exception bit so it will be seen later on */
- if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+ if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
return err;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index e2c431c0ce5d..90d213a2203f 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -510,6 +510,7 @@ int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
return 0;
}
+EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
--
2.25.1
next prev parent reply other threads:[~2021-07-02 13:42 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-02 13:42 [PATCH 0/3] mmc: core: Avoid hogging the CPU while polling for busy Ulf Hansson
2021-07-02 13:42 ` [PATCH 1/3] mmc: core: Avoid hogging the CPU while polling for busy in the I/O err path Ulf Hansson
2021-07-02 13:42 ` [PATCH 2/3] mmc: core: Avoid hogging the CPU while polling for busy for mmc ioctls Ulf Hansson
2021-07-02 13:42 ` Ulf Hansson [this message]
2021-07-06 7:05 ` [PATCH 0/3] mmc: core: Avoid hogging the CPU while polling for busy Shawn Lin
2021-08-04 11:34 ` Ulf Hansson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210702134229.357717-4-ulf.hansson@linaro.org \
--to=ulf.hansson@linaro.org \
--cc=CLoehle@hyperstone.com \
--cc=adrian.hunter@intel.com \
--cc=linus.walleij@linaro.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mmc@vger.kernel.org \
--cc=shawn.lin@rock-chips.com \
--cc=wsa+renesas@sang-engineering.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).