public inbox for linux-block@vger.kernel.org
 help / color / mirror / Atom feed
From: Shawn Lin <shawn.lin@linux.dev>
To: Bin Liu <b-liu@ti.com>
Cc: shawn.lin@linux.dev, ulf.hansson@linaro.org, axboe@kernel.dk,
	linux-block@vger.kernel.org, linux-mmc@vger.kernel.org
Subject: Re: [PATCH] mmc: block: use single block write in retry
Date: Wed, 18 Mar 2026 09:43:10 +0800	[thread overview]
Message-ID: <1806373b-d402-5655-94d4-5cf7142e1382@linux.dev> (raw)
In-Reply-To: <20260317132454.b6i7wp763yj36d3v@iaqt7>

在 2026/03/17 星期二 21:24, Bin Liu 写道:
> Hi Shawn,
> 
> On Tue, Mar 17, 2026 at 08:46:40PM +0800, Shawn Lin wrote:
>> 在 2026/03/11 星期三 0:04, Bin Liu 写道:
>>> Due to errata i2493[0], multi-block write would still fail in retries.
>>>
>>> This patch reuses recovery_mode flag, and switches to single-block
>>> write in retry when multi-block write fails. It covers both CQE and
>>> non-CQE cases.
>>
>> MMC core natively retries single for multi block read. So I assume
>> what you are trying to do is make it the same for write path. We didn't
> 
> Yes I am trying something similar for writ path, but
> 
>> resort to upper block layer for help for read case in the past, so  I
>> think you could still achieve that within the scope of MMC core.
> 
> unlike the read path solution, I didn't solve the issue within the scope
> of MMC core, becaure it would intruduce more code than this patch does,
> especially the CQE and non-CQE cases have different failure path. (I
> didn't try to figure out how the single block read retry covers both
> failure cases...)
> 
> I feel the solution in this patch is more clever with less code
> introduced.
> 
>>
>> More importantly, what is the failure rate of this issue? If it occurs
> 
> Very rare. For example, on the EVM, the issue can only be triggered by
> writing 3 or more blocks of data pattern 0xFF00.

I manually modified cqhci and sdhci to inject errors for multi-block
writes under both CQE and non-CQE modes. The following patch appears to
work by testing. Could you please test it?

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 05ee76cb0a08..ca60991a3305 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1642,13 +1642,13 @@ static int mmc_blk_cqe_issue_flush(struct 
mmc_queue *mq, struct request *req)
  	return mmc_blk_cqe_start_req(mq->card->host, mrq);
  }

-static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request 
*req)
+static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request 
*req, int recovery_mode)
  {
  	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
  	struct mmc_host *host = mq->card->host;
  	int err;

-	mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+	mmc_blk_rw_rq_prep(mqrq, mq->card, recovery_mode, mq);
  	mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
  	mmc_pre_req(host, &mqrq->brq.mrq);

@@ -1663,13 +1663,23 @@ static int mmc_blk_cqe_issue_rw_rq(struct 
mmc_queue *mq, struct request *req)
  {
  	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
  	struct mmc_host *host = mq->card->host;
+	int err;

-	if (host->hsq_enabled)
-		return mmc_blk_hsq_issue_rw_rq(mq, req);
+	if (host->hsq_enabled) {
+		err = mmc_blk_hsq_issue_rw_rq(mq, req, 0);
+		if (err)
+			return mmc_blk_hsq_issue_rw_rq(mq, req, 1);
+	}

  	mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);

-	return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+	err = mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+	if (err) {
+		mmc_blk_data_prep(mq, mqrq, 1, NULL, NULL);
+		return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+	}
+
+	return 0;
  }

  static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1776,24 +1786,26 @@ static int mmc_blk_fix_state(struct mmc_card 
*card, struct request *req)
  	return err;
  }

-#define MMC_READ_SINGLE_RETRIES	2
+#define MMC_RW_SINGLE_RETRIES	2

-/* Single (native) sector read during recovery */
-static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
+/* Single (native) sector read or write during recovery */
+static void mmc_blk_rw_single(struct mmc_queue *mq, struct request *req)
  {
  	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
  	struct mmc_request *mrq = &mqrq->brq.mrq;
  	struct mmc_card *card = mq->card;
  	struct mmc_host *host = card->host;
  	blk_status_t error = BLK_STS_OK;
-	size_t bytes_per_read = queue_physical_block_size(mq->queue);
+	size_t bytes_per_rw = queue_physical_block_size(mq->queue);
+

  	do {
  		u32 status;
  		int err;
  		int retries = 0;

-		while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+		while (retries++ <= MMC_RW_SINGLE_RETRIES) {
  			mmc_blk_rw_rq_prep(mqrq, card, 1, mq);

  			mmc_wait_for_req(host, mrq);
@@ -1821,13 +1833,13 @@ static void mmc_blk_read_single(struct mmc_queue 
*mq, struct request *req)
  		else
  			error = BLK_STS_OK;

-	} while (blk_update_request(req, error, bytes_per_read));
+	} while (blk_update_request(req, error, bytes_per_rw));

  	return;

  error_exit:
  	mrq->data->bytes_xfered = 0;
-	blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
+	blk_update_request(req, BLK_STS_IOERR, bytes_per_rw);
  	/* Let it try the remaining request again */
  	if (mqrq->retries > MMC_MAX_RETRIES - 1)
  		mqrq->retries = MMC_MAX_RETRIES - 1;
@@ -1969,12 +1981,9 @@ static void mmc_blk_mq_rw_recovery(struct 
mmc_queue *mq, struct request *req)
  		return;
  	}

-	if (rq_data_dir(req) == READ && brq->data.blocks >
-			queue_physical_block_size(mq->queue) >> 9) {
-		/* Read one (native) sector at a time */
-		mmc_blk_read_single(mq, req);
-		return;
-	}
+	/* Perform one (native) sector at a time */
+	if (brq->data.blocks > queue_physical_block_size(mq->queue) >> 9)
+		mmc_blk_rw_single(mq, req);
  }

  static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)


  reply	other threads:[~2026-03-18  1:43 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-10 16:04 [PATCH] mmc: block: use single block write in retry Bin Liu
2026-03-16 15:01 ` Ulf Hansson
2026-03-16 16:22   ` Bin Liu
2026-03-24 12:44     ` Ulf Hansson
2026-03-17 12:46 ` Shawn Lin
2026-03-17 13:24   ` Bin Liu
2026-03-18  1:43     ` Shawn Lin [this message]
2026-03-18 14:55       ` Bin Liu
2026-03-18 15:10         ` Bin Liu
2026-03-18 15:17       ` Bin Liu
2026-03-23 13:05         ` Bin Liu
2026-03-24  7:34           ` Shawn Lin
2026-03-24 14:34 ` [PATCH v2 0/2] " Bin Liu
2026-03-24 14:34   ` [PATCH v2 1/2] block: define new rqf_flags RQF_XFER_SINGLE_BLK Bin Liu
2026-03-24 18:48     ` Jens Axboe
2026-03-24 19:11       ` Bin Liu
2026-03-24 19:16         ` Jens Axboe
2026-03-24 20:41           ` Bin Liu
2026-03-24 14:34   ` [PATCH v2 2/2] mmc: block: use single block write in retry Bin Liu
2026-03-24 14:57   ` [PATCH v2 0/2] " Ulf Hansson
2026-03-24 19:17     ` Jens Axboe
2026-03-25 13:49   ` [PATCH v3] " Bin Liu
2026-03-25 14:12     ` Jens Axboe
2026-03-25 14:21       ` Bin Liu
2026-03-25 14:23         ` Jens Axboe
2026-03-25 14:27           ` Bin Liu
2026-03-25 15:22             ` Ulf Hansson
2026-03-25 15:29               ` Bin Liu
2026-03-26 12:33     ` Ulf Hansson
2026-03-26 13:41       ` Bin Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1806373b-d402-5655-94d4-5cf7142e1382@linux.dev \
    --to=shawn.lin@linux.dev \
    --cc=axboe@kernel.dk \
    --cc=b-liu@ti.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-mmc@vger.kernel.org \
    --cc=ulf.hansson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox