From: Md Sadre Alam <mdalam@codeaurora.org>
To: miquel.raynal@bootlin.com, mani@kernel.org,
linux-mtd@lists.infradead.org, linux-kernel@vger.kernel.org
Cc: mdalam@codeaurora.org, sricharan@codeaurora.org
Subject: [PATCH 2/3] mtd: rawnand: qcom: Add sg list to handle status pipe request
Date: Wed, 15 Sep 2021 15:27:30 +0530 [thread overview]
Message-ID: <1631699851-12172-3-git-send-email-mdalam@codeaurora.org> (raw)
In-Reply-To: <1631699851-12172-1-git-send-email-mdalam@codeaurora.org>
From QPIC V2.0 onwards there is separate pipe to read status
for each code word while reading in enhanced mode. page scope
read and multi page read.
This sgl list will be use to handle the request via status pipe
during page scope and multi page read.
Signed-off-by: Md Sadre Alam <mdalam@codeaurora.org>
---
drivers/mtd/nand/raw/qcom_nandc.c | 34 ++++++++++++++++++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 42c6291..07448c4 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -213,6 +213,7 @@ nandc_set_reg(chip, reg, \
#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
+#define QPIC_PER_CW_STS_SGL 8
#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
@@ -258,6 +259,7 @@ struct bam_transaction {
struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ struct scatterlist *sts_sgl;
u32 bam_ce_pos;
u32 bam_ce_start;
u32 cmd_sgl_pos;
@@ -266,6 +268,8 @@ struct bam_transaction {
u32 tx_sgl_start;
u32 rx_sgl_pos;
u32 rx_sgl_start;
+ u32 sts_sgl_pos;
+ u32 sts_sgl_start;
bool wait_second_completion;
struct completion txn_done;
struct dma_async_tx_descriptor *last_data_desc;
@@ -508,6 +512,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+ if (nandc->props->qpic_v2)
+ bam_txn_size += (sizeof(*bam_txn->sts_sgl) * QPIC_PER_CW_STS_SGL);
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
if (!bam_txn_buf)
@@ -526,6 +532,12 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn->data_sgl = bam_txn_buf;
+ if (nandc->props->qpic_v2) {
+ bam_txn_buf +=
+ sizeof(*bam_txn->sts_sgl) * QPIC_PER_CW_STS_SGL * num_cw;
+ bam_txn->sts_sgl = bam_txn_buf;
+ }
+
init_completion(&bam_txn->txn_done);
return bam_txn;
@@ -554,6 +566,12 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
QPIC_PER_CW_CMD_SGL);
sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
QPIC_PER_CW_DATA_SGL);
+ if (nandc->props->qpic_v2) {
+ bam_txn->sts_sgl_pos = 0;
+ bam_txn->sts_sgl_start = 0;
+ sg_init_table(bam_txn->sts_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_STS_SGL);
+ }
reinit_completion(&bam_txn->txn_done);
}
@@ -808,6 +826,12 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
dir_eng = DMA_MEM_TO_DEV;
desc->dir = DMA_TO_DEVICE;
+ } else if (nandc->props->qpic_v2 && chan == nandc->sts_chan) {
+ sgl = &bam_txn->sts_sgl[bam_txn->sts_sgl_start];
+ sgl_cnt = bam_txn->sts_sgl_pos - bam_txn->sts_sgl_start;
+ bam_txn->sts_sgl_start = bam_txn->sts_sgl_pos;
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
} else {
sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
@@ -1394,6 +1418,14 @@ static int submit_descs(struct qcom_nand_controller *nandc)
if (r)
return r;
}
+
+ if (nandc->props->qpic_v2) {
+ if (bam_txn->sts_sgl_pos > bam_txn->sts_sgl_start) {
+ r = prepare_bam_async_desc(nandc, nandc->sts_chan, 0);
+ if (r)
+ return r;
+ }
+ }
}
list_for_each_entry(desc, &nandc->desc_list, node)
@@ -1411,6 +1443,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
dma_async_issue_pending(nandc->tx_chan);
dma_async_issue_pending(nandc->rx_chan);
dma_async_issue_pending(nandc->cmd_chan);
+ if (nandc->props->qpic_v2)
+ dma_async_issue_pending(nandc->sts_chan);
if (!wait_for_completion_timeout(&bam_txn->txn_done,
QPIC_NAND_COMPLETION_TIMEOUT))
--
2.7.4
next prev parent reply other threads:[~2021-09-15 9:58 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-15 9:57 [PATCH 0/3] Add support for page scope read Md Sadre Alam
2021-09-15 9:57 ` [PATCH 1/3] mtd: rawnand: qcom: Add support for status pipe Md Sadre Alam
2021-09-28 12:17 ` mdalam
2021-09-28 12:46 ` Miquel Raynal
2021-09-28 15:52 ` Manivannan Sadhasivam
2021-09-15 9:57 ` Md Sadre Alam [this message]
2021-09-28 12:20 ` [PATCH 2/3] mtd: rawnand: qcom: Add sg list to handle status pipe request mdalam
2021-09-28 12:48 ` Miquel Raynal
2021-09-28 16:03 ` Manivannan Sadhasivam
2021-09-15 9:57 ` [PATCH 3/3] mtd: rawnand: qcom: Add support for page scope read Md Sadre Alam
2021-09-28 12:21 ` mdalam
2021-09-28 12:54 ` Miquel Raynal
2021-09-28 16:12 ` Manivannan Sadhasivam
2021-09-28 15:17 ` [PATCH 0/3] " Manivannan Sadhasivam
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1631699851-12172-3-git-send-email-mdalam@codeaurora.org \
--to=mdalam@codeaurora.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mtd@lists.infradead.org \
--cc=mani@kernel.org \
--cc=miquel.raynal@bootlin.com \
--cc=sricharan@codeaurora.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox