From: Adrian Hunter <adrian.hunter@intel.com>
To: Ulf Hansson <ulf.hansson@linaro.org>
Cc: linux-mmc <linux-mmc@vger.kernel.org>,
Alex Lemberg <alex.lemberg@sandisk.com>,
Mateusz Nowak <mateusz.nowak@intel.com>,
Yuliy Izrailov <Yuliy.Izrailov@sandisk.com>,
Jaehoon Chung <jh80.chung@samsung.com>,
Dong Aisheng <dongas86@gmail.com>,
Das Asutosh <asutoshd@codeaurora.org>,
Zhangfei Gao <zhangfei.gao@gmail.com>,
Dorfman Konstantin <kdorfman@codeaurora.org>,
David Griego <david.griego@linaro.org>,
Sahitya Tummala <stummala@codeaurora.org>,
Harjani Ritesh <riteshh@codeaurora.org>,
Venu Byravarasu <vbyravarasu@nvidia.com>,
Linus Walleij <linus.walleij@linaro.org>
Subject: [PATCH V1 06/18] mmc: block: Factor out data preparation
Date: Mon, 6 Mar 2017 11:11:01 +0200 [thread overview]
Message-ID: <1488791473-24981-7-git-send-email-adrian.hunter@intel.com> (raw)
In-Reply-To: <1488791473-24981-1-git-send-email-adrian.hunter@intel.com>
Factor out data preparation into a separate function mmc_blk_data_prep()
which can be re-used for command queuing.
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
---
drivers/mmc/core/block.c | 151 +++++++++++++++++++++++++----------------------
1 file changed, 82 insertions(+), 69 deletions(-)
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 1dccc21e6912..83321a2e5875 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1426,36 +1426,39 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_SUCCESS;
}
-static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
- struct mmc_card *card,
- int disable_multi,
- struct mmc_queue *mq)
+static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
+ int disable_multi, bool *do_rel_wr,
+ bool *do_data_tag)
{
- u32 readcmd, writecmd;
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mqrq->req;
- struct mmc_blk_data *md = mq->blkdata;
- bool do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
* are supported only on MMCs.
*/
- bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
- (rq_data_dir(req) == WRITE) &&
- (md->flags & MMC_BLK_REL_WR);
+ *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
memset(brq, 0, sizeof(struct mmc_blk_request));
- brq->mrq.cmd = &brq->cmd;
+
brq->mrq.data = &brq->data;
- brq->cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq->cmd.arg <<= 9;
- brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- brq->data.blksz = 512;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
+
+ if (rq_data_dir(req) == READ) {
+ brq->data.flags = MMC_DATA_READ;
+ brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ brq->data.flags = MMC_DATA_WRITE;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ }
+
+ brq->data.blksz = 512;
brq->data.blocks = blk_rq_sectors(req);
/*
@@ -1486,6 +1489,68 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->data.blocks);
}
+ if (*do_rel_wr)
+ mmc_apply_rel_rw(brq, card, req);
+
+ /*
+ * Data tag is used only during writing meta data to speed
+ * up write and any subsequent read of this meta data
+ */
+ *do_data_tag = card->ext_csd.data_tag_unit_size &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
+ }
+ }
+ brq->data.sg_len = i;
+ }
+
+ mqrq->areq.mrq = &brq->mrq;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int disable_multi,
+ struct mmc_queue *mq)
+{
+ u32 readcmd, writecmd;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct mmc_blk_data *md = mq->blkdata;
+ bool do_rel_wr, do_data_tag;
+
+ mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag);
+
+ brq->mrq.cmd = &brq->cmd;
+
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
if (brq->data.blocks > 1 || do_rel_wr) {
/* SPI multiblock writes terminate using a special
* token, not a STOP_TRANSMISSION request.
@@ -1500,32 +1565,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
- if (rq_data_dir(req) == READ) {
- brq->cmd.opcode = readcmd;
- brq->data.flags = MMC_DATA_READ;
- if (brq->mrq.stop)
- brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
- MMC_CMD_AC;
- } else {
- brq->cmd.opcode = writecmd;
- brq->data.flags = MMC_DATA_WRITE;
- if (brq->mrq.stop)
- brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
- MMC_CMD_AC;
- }
-
- if (do_rel_wr)
- mmc_apply_rel_rw(brq, card, req);
-
- /*
- * Data tag is used only during writing meta data to speed
- * up write and any subsequent read of this meta data
- */
- do_data_tag = (card->ext_csd.data_tag_unit_size) &&
- (req->cmd_flags & REQ_META) &&
- (rq_data_dir(req) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
/*
* Pre-defined multi-block transfers are preferable to
@@ -1556,34 +1596,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->mrq.sbc = &brq->sbc;
}
- mmc_set_data_timeout(&brq->data, card);
-
- brq->data.sg = mqrq->sg;
- brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
-
- /*
- * Adjust the sg list so it is the same size as the
- * request.
- */
- if (brq->data.blocks != blk_rq_sectors(req)) {
- int i, data_size = brq->data.blocks << 9;
- struct scatterlist *sg;
-
- for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
- data_size -= sg->length;
- if (data_size <= 0) {
- sg->length += data_size;
- i++;
- break;
- }
- }
- brq->data.sg_len = i;
- }
-
- mqrq->areq.mrq = &brq->mrq;
mqrq->areq.err_check = mmc_blk_err_check;
-
- mmc_queue_bounce_pre(mqrq);
}
static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
--
1.9.1
next prev parent reply other threads:[~2017-03-06 9:17 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20170306091701epcas4p30ef050e2e5bd1616457a6347de9b6717@epcas4p3.samsung.com>
2017-03-06 9:10 ` [PATCH V1 00/18] mmc: Add Command Queue support Adrian Hunter
2017-03-06 9:10 ` [PATCH V1 01/18] mmc: queue: Share mmc request array between partitions Adrian Hunter
2017-03-06 9:10 ` [PATCH V1 02/18] mmc: mmc: Add functions to enable / disable the Command Queue Adrian Hunter
2017-03-06 9:10 ` [PATCH V1 03/18] mmc: mmc_test: Disable Command Queue while mmc_test is used Adrian Hunter
2017-03-06 9:10 ` [PATCH V1 04/18] mmc: block: Disable Command Queue while RPMB " Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 05/18] mmc: block: Change mmc_apply_rel_rw() to get block address from the request Adrian Hunter
2017-03-06 9:11 ` Adrian Hunter [this message]
2017-03-06 9:11 ` [PATCH V1 07/18] mmc: core: Factor out debug prints from mmc_start_request() Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 08/18] mmc: core: Factor out mrq preparation " Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 09/18] mmc: core: Add mmc_retune_hold_now() Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 10/18] mmc: core: Add members to mmc_request and mmc_data for CQE's Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 11/18] mmc: host: Add CQE interface Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 12/18] mmc: core: Turn off CQE before sending commands Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 13/18] mmc: core: Add support for handling CQE requests Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 14/18] mmc: mmc: Enable Command Queuing Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 15/18] mmc: mmc: Enable CQE's Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 16/18] mmc: block: Prepare CQE data Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 17/18] mmc: block: Add CQE support Adrian Hunter
2017-03-06 9:11 ` [PATCH V1 18/18] mmc: cqhci: support for command queue enabled host Adrian Hunter
2017-03-08 5:18 ` [PATCH V1 00/18] mmc: Add Command Queue support Jaehoon Chung
2017-03-08 8:08 ` Adrian Hunter
2017-03-08 13:27 ` Jaehoon Chung
2017-03-09 2:47 ` Shawn Lin
2017-03-09 8:14 ` Shawn Lin
2017-03-09 8:52 ` Adrian Hunter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1488791473-24981-7-git-send-email-adrian.hunter@intel.com \
--to=adrian.hunter@intel.com \
--cc=Yuliy.Izrailov@sandisk.com \
--cc=alex.lemberg@sandisk.com \
--cc=asutoshd@codeaurora.org \
--cc=david.griego@linaro.org \
--cc=dongas86@gmail.com \
--cc=jh80.chung@samsung.com \
--cc=kdorfman@codeaurora.org \
--cc=linus.walleij@linaro.org \
--cc=linux-mmc@vger.kernel.org \
--cc=mateusz.nowak@intel.com \
--cc=riteshh@codeaurora.org \
--cc=stummala@codeaurora.org \
--cc=ulf.hansson@linaro.org \
--cc=vbyravarasu@nvidia.com \
--cc=zhangfei.gao@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox