From: Abhishek Sahu <absahu@codeaurora.org>
To: Andy Gross <andy.gross@linaro.org>, Wolfram Sang <wsa@the-dreams.de>
Cc: David Brown <david.brown@linaro.org>,
Sricharan R <sricharan@codeaurora.org>,
linux-arm-msm@vger.kernel.org, linux-soc@vger.kernel.org,
linux-i2c@vger.kernel.org, linux-kernel@vger.kernel.org,
Abhishek Sahu <absahu@codeaurora.org>
Subject: [PATCH 09/12] i2c: qup: fix buffer overflow for multiple msg of maximum xfer len
Date: Sat, 3 Feb 2018 13:28:14 +0530 [thread overview]
Message-ID: <1517644697-30806-10-git-send-email-absahu@codeaurora.org> (raw)
In-Reply-To: <1517644697-30806-1-git-send-email-absahu@codeaurora.org>
The BAM mode requires buffer for start tag data and tx, rx SG
list. Currently, this is being taken for maximum transfer length
(65K). But an I2C transfer can have multiple messages and each
message can be of this maximum length so the buffer overflow will
happen in this case. Since increasing buffer length won’t be
feasible since an I2C transfer can contain any number of messages
so this patch does following changes to make i2c transfers working
for multiple messages case.
1. Calculate the required buffers for 2 maximum length messages
(65K * 2).
2. Split the descriptor formation and descriptor scheduling.
The idea is to fit as many messages in one DMA transfers for 65K
threshold value (max_xfer_sg_len). Whenever the sg_cnt is
crossing this, then schedule the BAM transfer and subsequent
transfer will again start from zero.
Signed-off-by: Abhishek Sahu <absahu@codeaurora.org>
---
drivers/i2c/busses/i2c-qup.c | 199 +++++++++++++++++++++++++------------------
1 file changed, 118 insertions(+), 81 deletions(-)
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 6df65ea..ba717bb 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -155,6 +155,7 @@ struct qup_i2c_bam {
struct qup_i2c_tag tag;
struct dma_chan *dma;
struct scatterlist *sg;
+ unsigned int sg_cnt;
};
struct qup_i2c_dev {
@@ -195,6 +196,8 @@ struct qup_i2c_dev {
bool use_dma;
/* The threshold length above which DMA will be used */
unsigned int dma_threshold;
+ unsigned int max_xfer_sg_len;
+ unsigned int tag_buf_pos;
struct dma_pool *dpool;
struct qup_i2c_tag start_tag;
struct qup_i2c_bam brx;
@@ -699,86 +702,86 @@ static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
return 0;
}
-static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
- int num)
+static int qup_i2c_bam_make_desc(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0, limit = QUP_READ_LIMIT;
+ u32 len = 0, blocks, rem;
+ u32 i = 0, tlen, tx_len = 0;
+ u8 *tags;
+
+ qup_i2c_set_blk_data(qup, msg);
+
+ blocks = qup->blk.count;
+ rem = msg->len - (blocks - 1) * limit;
+
+ if (msg->flags & I2C_M_RD) {
+ while (qup->blk.pos < blocks) {
+ tlen = (i == (blocks - 1)) ? rem : limit;
+ tags = &qup->start_tag.start[qup->tag_buf_pos + len];
+ len += qup_i2c_set_tags(tags, qup, msg);
+ qup->blk.data_len -= tlen;
+
+ /* scratch buf to read the start and len tags */
+ ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
+ &qup->brx.tag.start[0],
+ 2, qup, DMA_FROM_DEVICE);
+
+ if (ret)
+ return ret;
+
+ ret = qup_sg_set_buf(&qup->brx.sg[qup->brx.sg_cnt++],
+ &msg->buf[limit * i],
+ tlen, qup,
+ DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ i++;
+ qup->blk.pos = i;
+ }
+ ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
+ &qup->start_tag.start[qup->tag_buf_pos],
+ len, qup, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ qup->tag_buf_pos += len;
+ } else {
+ while (qup->blk.pos < blocks) {
+ tlen = (i == (blocks - 1)) ? rem : limit;
+ tags = &qup->start_tag.start[qup->tag_buf_pos + tx_len];
+ len = qup_i2c_set_tags(tags, qup, msg);
+ qup->blk.data_len -= tlen;
+
+ ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
+ tags, len,
+ qup, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ tx_len += len;
+ ret = qup_sg_set_buf(&qup->btx.sg[qup->btx.sg_cnt++],
+ &msg->buf[limit * i],
+ tlen, qup, DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+ i++;
+ qup->blk.pos = i;
+ }
+
+ qup->tag_buf_pos += tx_len;
+ }
+
+ return 0;
+}
+
+static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup)
{
struct dma_async_tx_descriptor *txd, *rxd = NULL;
- int ret = 0, idx = 0, limit = QUP_READ_LIMIT;
+ int ret = 0;
dma_cookie_t cookie_rx, cookie_tx;
- u32 len, blocks, rem;
- u32 i, tlen, tx_len, tx_buf = 0, rx_buf = 0, off = 0;
- u8 *tags;
-
- while (idx < num) {
- tx_len = 0, len = 0, i = 0;
-
- qup->is_last = (idx == (num - 1));
-
- qup_i2c_set_blk_data(qup, msg);
-
- blocks = qup->blk.count;
- rem = msg->len - (blocks - 1) * limit;
-
- if (msg->flags & I2C_M_RD) {
- while (qup->blk.pos < blocks) {
- tlen = (i == (blocks - 1)) ? rem : limit;
- tags = &qup->start_tag.start[off + len];
- len += qup_i2c_set_tags(tags, qup, msg);
- qup->blk.data_len -= tlen;
-
- /* scratch buf to read the start and len tags */
- ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
- &qup->brx.tag.start[0],
- 2, qup, DMA_FROM_DEVICE);
-
- if (ret)
- return ret;
-
- ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
- &msg->buf[limit * i],
- tlen, qup,
- DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
- i++;
- qup->blk.pos = i;
- }
- ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
- &qup->start_tag.start[off],
- len, qup, DMA_TO_DEVICE);
- if (ret)
- return ret;
-
- off += len;
- } else {
- while (qup->blk.pos < blocks) {
- tlen = (i == (blocks - 1)) ? rem : limit;
- tags = &qup->start_tag.start[off + tx_len];
- len = qup_i2c_set_tags(tags, qup, msg);
- qup->blk.data_len -= tlen;
-
- ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
- tags, len,
- qup, DMA_TO_DEVICE);
- if (ret)
- return ret;
-
- tx_len += len;
- ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
- &msg->buf[limit * i],
- tlen, qup, DMA_TO_DEVICE);
- if (ret)
- return ret;
- i++;
- qup->blk.pos = i;
- }
- off += tx_len;
-
- }
- idx++;
- msg++;
- }
+ u32 len = 0;
+ u32 tx_buf = qup->btx.sg_cnt, rx_buf = qup->brx.sg_cnt;
/* schedule the EOT and FLUSH I2C tags */
len = 1;
@@ -878,11 +881,19 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
return ret;
}
+static void qup_i2c_bam_clear_tag_buffers(struct qup_i2c_dev *qup)
+{
+ qup->btx.sg_cnt = 0;
+ qup->brx.sg_cnt = 0;
+ qup->tag_buf_pos = 0;
+}
+
static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int num)
{
struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
int ret = 0;
+ int idx = 0;
enable_irq(qup->irq);
ret = qup_i2c_req_dma(qup);
@@ -905,9 +916,34 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
goto out;
writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+ qup_i2c_bam_clear_tag_buffers(qup);
+
+ for (idx = 0; idx < num; idx++) {
+ qup->msg = msg + idx;
+ qup->is_last = idx == (num - 1);
+
+ ret = qup_i2c_bam_make_desc(qup, qup->msg);
+ if (ret)
+ break;
+
+ /*
+ * Make DMA descriptor and schedule the BAM transfer if its
+ * already crossed the maximum length. Since the memory for all
+ * tags buffers have been taken for 2 maximum possible
+ * transfers length so it will never cross the buffer actual
+ * length.
+ */
+ if (qup->btx.sg_cnt > qup->max_xfer_sg_len ||
+ qup->brx.sg_cnt > qup->max_xfer_sg_len ||
+ qup->is_last) {
+ ret = qup_i2c_bam_schedule_desc(qup);
+ if (ret)
+ break;
+
+ qup_i2c_bam_clear_tag_buffers(qup);
+ }
+ }
- qup->msg = msg;
- ret = qup_i2c_bam_do_xfer(qup, qup->msg, num);
out:
disable_irq(qup->irq);
@@ -1459,7 +1495,8 @@ static int qup_i2c_probe(struct platform_device *pdev)
else if (ret != 0)
goto nodma;
- blocks = (MX_BLOCKS << 1) + 1;
+ qup->max_xfer_sg_len = (MX_BLOCKS << 1);
+ blocks = 2 * qup->max_xfer_sg_len + 1;
qup->btx.sg = devm_kzalloc(&pdev->dev,
sizeof(*qup->btx.sg) * blocks,
GFP_KERNEL);
@@ -1603,7 +1640,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
one_bit_t = (USEC_PER_SEC / clk_freq) + 1;
qup->one_byte_t = one_bit_t * 9;
qup->xfer_timeout = TOUT_MIN * HZ +
- usecs_to_jiffies(MX_TX_RX_LEN * qup->one_byte_t);
+ usecs_to_jiffies(2 * MX_TX_RX_LEN * qup->one_byte_t);
dev_dbg(qup->dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
qup->in_blk_sz, qup->in_fifo_sz,
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, hosted by The Linux Foundation
next prev parent reply other threads:[~2018-02-03 7:58 UTC|newest]
Thread overview: 47+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-02-03 7:58 [PATCH 00/12] Major code reorganization to make all i2c transfers working Abhishek Sahu
2018-02-03 7:58 ` [PATCH 01/12] i2c: qup: fixed releasing dma without flush operation completion Abhishek Sahu
2018-02-08 14:03 ` Sricharan R
2018-02-27 21:46 ` Christ, Austin
2018-02-27 22:24 ` Andy Gross
2018-02-03 7:58 ` [PATCH 02/12] i2c: qup: minor code reorganization for use_dma Abhishek Sahu
2018-02-27 21:48 ` Christ, Austin
2018-02-27 22:26 ` Andy Gross
2018-02-03 7:58 ` [PATCH 03/12] i2c: qup: remove redundant variables for BAM SG count Abhishek Sahu
2018-02-09 2:16 ` Sricharan R
2018-02-27 21:51 ` Christ, Austin
2018-02-27 22:28 ` Andy Gross
2018-02-03 7:58 ` [PATCH 04/12] i2c: qup: schedule EOT and FLUSH tags at the end of transfer Abhishek Sahu
2018-02-15 14:31 ` Sricharan R
2018-02-27 22:36 ` Andy Gross
2018-03-08 13:40 ` Abhishek Sahu
2018-02-03 7:58 ` [PATCH 05/12] i2c: qup: fix the transfer length for BAM rx EOT FLUSH tags Abhishek Sahu
2018-02-27 22:38 ` Andy Gross
2018-02-03 7:58 ` [PATCH 06/12] i2c: qup: proper error handling for i2c error in BAM mode Abhishek Sahu
2018-02-16 4:33 ` Sricharan R
2018-02-27 22:00 ` Christ, Austin
2018-02-27 22:58 ` Andy Gross
2018-03-12 12:34 ` Abhishek Sahu
2018-02-03 7:58 ` [PATCH 07/12] i2c: qup: use the complete transfer length to choose DMA mode Abhishek Sahu
2018-02-16 4:35 ` Sricharan R
2018-02-27 22:01 ` Christ, Austin
2018-02-27 22:59 ` Andy Gross
2018-02-03 7:58 ` [PATCH 08/12] i2c: qup: change completion timeout according to transfer length Abhishek Sahu
2018-02-16 4:48 ` Sricharan R
[not found] ` <6a1983c0ca81afce908f622a53abd563@codeaurora.org>
2018-02-27 23:05 ` Andy Gross
2018-02-03 7:58 ` Abhishek Sahu [this message]
2018-02-16 5:21 ` [PATCH 09/12] i2c: qup: fix buffer overflow for multiple msg of maximum xfer len Sricharan R
2018-02-27 22:06 ` Christ, Austin
2018-03-12 13:55 ` Abhishek Sahu
2018-02-27 23:15 ` Andy Gross
2018-03-12 12:28 ` Abhishek Sahu
2018-02-03 7:58 ` [PATCH 10/12] i2c: qup: send NACK for last read sub transfers Abhishek Sahu
2018-02-16 5:39 ` Sricharan R
2018-02-27 22:07 ` Christ, Austin
2018-02-27 23:17 ` Andy Gross
2018-02-03 7:58 ` [PATCH 11/12] i2c: qup: reorganization of driver code to remove polling for qup v1 Abhishek Sahu
2018-02-05 23:03 ` kbuild test robot
2018-02-16 7:44 ` Sricharan R
2018-02-03 7:58 ` [PATCH 12/12] i2c: qup: reorganization of driver code to remove polling for qup v2 Abhishek Sahu
2018-02-16 11:23 ` Sricharan R
2018-02-27 23:24 ` Christ, Austin
2018-03-12 13:58 ` Abhishek Sahu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1517644697-30806-10-git-send-email-absahu@codeaurora.org \
--to=absahu@codeaurora.org \
--cc=andy.gross@linaro.org \
--cc=david.brown@linaro.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-i2c@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-soc@vger.kernel.org \
--cc=sricharan@codeaurora.org \
--cc=wsa@the-dreams.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).