From: Bartosz Golaszewski <brgl@bgdev.pl>
To: Vinod Koul <vkoul@kernel.org>, Jonathan Corbet <corbet@lwn.net>,
Thara Gopinath <thara.gopinath@gmail.com>,
Herbert Xu <herbert@gondor.apana.org.au>,
"David S. Miller" <davem@davemloft.net>,
Udit Tiwari <quic_utiwari@quicinc.com>,
Daniel Perez-Zoghbi <dperezzo@quicinc.com>,
Md Sadre Alam <mdalam@qti.qualcomm.com>,
Dmitry Baryshkov <lumag@kernel.org>
Cc: dmaengine@vger.kernel.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-arm-msm@vger.kernel.org,
linux-crypto@vger.kernel.org,
Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
Subject: [PATCH v9 09/11] crypto: qce - Add BAM DMA support for crypto register I/O
Date: Fri, 28 Nov 2025 12:44:07 +0100 [thread overview]
Message-ID: <20251128-qcom-qce-cmd-descr-v9-9-9a5f72b89722@linaro.org> (raw)
In-Reply-To: <20251128-qcom-qce-cmd-descr-v9-0-9a5f72b89722@linaro.org>
From: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
Implement the infrastructure for performing register I/O over BAM DMA,
not CPU. No functional change yet.
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
---
drivers/crypto/qce/core.h | 4 ++
drivers/crypto/qce/dma.c | 109 ++++++++++++++++++++++++++++++++++++++++++++++
drivers/crypto/qce/dma.h | 5 +++
3 files changed, 118 insertions(+)
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index a80e12eac6c87e5321cce16c56a4bf5003474ef0..d238097f834e4605f3825f23d0316d4196439116 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -30,6 +30,8 @@
* @base_dma: base DMA address
* @base_phys: base physical address
* @dma_size: size of memory mapped for DMA
+ * @read_buf: Buffer for DMA to write back to
+ * @read_buf_dma: Mapped address of the read buffer
* @async_req_enqueue: invoked by every algorithm to enqueue a request
* @async_req_done: invoked by every algorithm to finish its request
*/
@@ -49,6 +51,8 @@ struct qce_device {
dma_addr_t base_dma;
phys_addr_t base_phys;
size_t dma_size;
+ __le32 *read_buf;
+ dma_addr_t read_buf_dma;
int (*async_req_enqueue)(struct qce_device *qce,
struct crypto_async_request *req);
void (*async_req_done)(struct qce_device *qce, int ret);
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index a46264735bb895b6199969e83391383ccbbacc5f..ba7a52fd4c6349d59c075c346f75741defeb6034 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -4,6 +4,8 @@
*/
#include <linux/device.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
@@ -11,6 +13,98 @@
#include "dma.h"
#define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE)
+#define QCE_BAM_CMD_SGL_SIZE 128
+#define QCE_BAM_CMD_ELEMENT_SIZE 128
+#define QCE_MAX_REG_READ 8
+
+struct qce_desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_data_direction dir;
+};
+
+struct qce_bam_transaction {
+ struct bam_cmd_element bam_ce[QCE_BAM_CMD_ELEMENT_SIZE];
+ struct scatterlist wr_sgl[QCE_BAM_CMD_SGL_SIZE];
+ struct qce_desc_info *desc;
+ u32 bam_ce_idx;
+ u32 pre_bam_ce_idx;
+ u32 wr_sgl_cnt;
+};
+
+void qce_clear_bam_transaction(struct qce_device *qce)
+{
+ struct qce_bam_transaction *bam_txn = qce->dma.bam_txn;
+
+ bam_txn->bam_ce_idx = 0;
+ bam_txn->wr_sgl_cnt = 0;
+ bam_txn->bam_ce_idx = 0;
+ bam_txn->pre_bam_ce_idx = 0;
+}
+
+int qce_submit_cmd_desc(struct qce_device *qce)
+{
+ struct qce_desc_info *qce_desc = qce->dma.bam_txn->desc;
+ struct qce_bam_transaction *bam_txn = qce->dma.bam_txn;
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_chan *chan = qce->dma.rxchan;
+ unsigned long attrs = DMA_PREP_CMD;
+ dma_cookie_t cookie;
+ unsigned int mapped;
+ int ret;
+
+ mapped = dma_map_sg_attrs(qce->dev, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt,
+ DMA_TO_DEVICE, attrs);
+ if (!mapped)
+ return -ENOMEM;
+
+ dma_desc = dmaengine_prep_slave_sg(chan, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt,
+ DMA_MEM_TO_DEV, attrs);
+ if (!dma_desc) {
+ dma_unmap_sg(qce->dev, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ qce_desc->dma_desc = dma_desc;
+ cookie = dmaengine_submit(qce_desc->dma_desc);
+
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return ret;
+
+ qce_dma_issue_pending(&qce->dma);
+
+ return 0;
+}
+
+static void qce_prep_dma_cmd_desc(struct qce_device *qce, struct qce_dma_data *dma,
+ unsigned int addr, void *buf)
+{
+ struct qce_bam_transaction *bam_txn = dma->bam_txn;
+ struct bam_cmd_element *bam_ce_buf;
+ int bam_ce_size, cnt, idx;
+
+ idx = bam_txn->bam_ce_idx;
+ bam_ce_buf = &bam_txn->bam_ce[idx];
+ bam_prep_ce_le32(bam_ce_buf, addr, BAM_WRITE_COMMAND, *((__le32 *)buf));
+
+ bam_ce_buf = &bam_txn->bam_ce[bam_txn->pre_bam_ce_idx];
+ bam_txn->bam_ce_idx++;
+ bam_ce_size = (bam_txn->bam_ce_idx - bam_txn->pre_bam_ce_idx) * sizeof(*bam_ce_buf);
+
+ cnt = bam_txn->wr_sgl_cnt;
+
+ sg_set_buf(&bam_txn->wr_sgl[cnt], bam_ce_buf, bam_ce_size);
+
+ ++bam_txn->wr_sgl_cnt;
+ bam_txn->pre_bam_ce_idx = bam_txn->bam_ce_idx;
+}
+
+void qce_write_dma(struct qce_device *qce, unsigned int offset, u32 val)
+{
+ unsigned int reg_addr = ((unsigned int)(qce->base_phys) + offset);
+
+ qce_prep_dma_cmd_desc(qce, &qce->dma, reg_addr, &val);
+}
int devm_qce_dma_request(struct qce_device *qce)
{
@@ -31,6 +125,21 @@ int devm_qce_dma_request(struct qce_device *qce)
if (!dma->result_buf)
return -ENOMEM;
+ dma->bam_txn = devm_kzalloc(dev, sizeof(*dma->bam_txn), GFP_KERNEL);
+ if (!dma->bam_txn)
+ return -ENOMEM;
+
+ dma->bam_txn->desc = devm_kzalloc(dev, sizeof(*dma->bam_txn->desc), GFP_KERNEL);
+ if (!dma->bam_txn->desc)
+ return -ENOMEM;
+
+ sg_init_table(dma->bam_txn->wr_sgl, QCE_BAM_CMD_SGL_SIZE);
+
+ qce->read_buf = dmam_alloc_coherent(qce->dev, QCE_MAX_REG_READ * sizeof(*qce->read_buf),
+ &qce->read_buf_dma, GFP_KERNEL);
+ if (!qce->read_buf)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 483789d9fa98e79d1283de8297bf2fc2a773f3a7..f05dfa9e6b25bd60e32f45079a8bc7e6a4cf81f9 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -8,6 +8,7 @@
#include <linux/dmaengine.h>
+struct qce_bam_transaction;
struct qce_device;
/* maximum data transfer block size between BAM and CE */
@@ -32,6 +33,7 @@ struct qce_dma_data {
struct dma_chan *txchan;
struct dma_chan *rxchan;
struct qce_result_dump *result_buf;
+ struct qce_bam_transaction *bam_txn;
};
int devm_qce_dma_request(struct qce_device *qce);
@@ -43,5 +45,8 @@ int qce_dma_terminate_all(struct qce_dma_data *dma);
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add,
unsigned int max_len);
+void qce_write_dma(struct qce_device *qce, unsigned int offset, u32 val);
+int qce_submit_cmd_desc(struct qce_device *qce);
+void qce_clear_bam_transaction(struct qce_device *qce);
#endif /* _DMA_H_ */
--
2.51.0
next prev parent reply other threads:[~2025-11-28 11:44 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-28 11:43 [PATCH v9 00/11] crypto/dmaengine: qce: introduce BAM locking and use DMA for register I/O Bartosz Golaszewski
2025-11-28 11:43 ` [PATCH v9 01/11] dmaengine: qcom: bam_dma: Extend the driver's device match data Bartosz Golaszewski
2025-11-28 11:44 ` [PATCH v9 02/11] dmaengine: qcom: bam_dma: Add bam_pipe_lock flag support Bartosz Golaszewski
2025-12-06 11:42 ` Dmitry Baryshkov
2025-11-28 11:44 ` [PATCH v9 03/11] dmaengine: qcom: bam_dma: implement support for BAM locking Bartosz Golaszewski
2025-12-06 11:44 ` Dmitry Baryshkov
2025-12-16 13:00 ` Vinod Koul
2025-12-16 15:00 ` Bartosz Golaszewski
2025-12-16 15:10 ` Vinod Koul
2025-12-17 14:31 ` Bartosz Golaszewski
2025-11-28 11:44 ` [PATCH v9 04/11] crypto: qce - Include algapi.h in the core.h header Bartosz Golaszewski
2025-11-28 11:44 ` [PATCH v9 05/11] crypto: qce - Remove unused ignore_buf Bartosz Golaszewski
2025-11-28 12:01 ` Konrad Dybcio
2025-11-28 12:05 ` Bartosz Golaszewski
2025-11-28 12:09 ` Konrad Dybcio
2025-11-28 11:44 ` [PATCH v9 06/11] crypto: qce - Simplify arguments of devm_qce_dma_request() Bartosz Golaszewski
2025-11-28 11:44 ` [PATCH v9 07/11] crypto: qce - Use existing devres APIs in devm_qce_dma_request() Bartosz Golaszewski
2025-11-28 12:03 ` Konrad Dybcio
2025-11-28 11:44 ` [PATCH v9 08/11] crypto: qce - Map crypto memory for DMA Bartosz Golaszewski
2025-11-28 11:44 ` Bartosz Golaszewski [this message]
2025-11-28 11:44 ` [PATCH v9 10/11] crypto: qce - Add support for BAM locking Bartosz Golaszewski
2025-12-01 13:03 ` Konrad Dybcio
2025-11-28 11:44 ` [PATCH v9 11/11] crypto: qce - Switch to using BAM DMA for crypto I/O Bartosz Golaszewski
2025-11-28 12:08 ` Konrad Dybcio
2025-11-28 12:11 ` Bartosz Golaszewski
2025-11-28 12:57 ` Konrad Dybcio
2025-12-06 11:45 ` Dmitry Baryshkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251128-qcom-qce-cmd-descr-v9-9-9a5f72b89722@linaro.org \
--to=brgl@bgdev.pl \
--cc=bartosz.golaszewski@linaro.org \
--cc=corbet@lwn.net \
--cc=davem@davemloft.net \
--cc=dmaengine@vger.kernel.org \
--cc=dperezzo@quicinc.com \
--cc=herbert@gondor.apana.org.au \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=lumag@kernel.org \
--cc=mdalam@qti.qualcomm.com \
--cc=quic_utiwari@quicinc.com \
--cc=thara.gopinath@gmail.com \
--cc=vkoul@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).