From: Harsh Jain <h.jain@amd.com>
To: <herbert@gondor.apana.org.au>, <davem@davemloft.net>,
<linux-crypto@vger.kernel.org>, <mounika.botcha@amd.com>,
<sarat.chand.savitala@amd.com>, <michal.simek@amd.com>
Cc: Harsh Jain <h.jain@amd.com>
Subject: [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash
Date: Tue, 3 Mar 2026 12:49:49 +0530 [thread overview]
Message-ID: <20260303071953.149252-3-h.jain@amd.com> (raw)
In-Reply-To: <20260303071953.149252-1-h.jain@amd.com>
descsize in shash type has size limit, For driver which fallback does
not fit in shash type. Change the algo type from shash to ahash.
Also adds crypto engine support to serialize the requests.
Signed-off-by: Harsh Jain <h.jain@amd.com>
---
drivers/crypto/xilinx/zynqmp-sha.c | 258 +++++++++++++++++++----------
1 file changed, 171 insertions(+), 87 deletions(-)
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 1d6b7f971111..cd951e692dd9 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -2,8 +2,10 @@
/*
* Xilinx ZynqMP SHA Driver.
* Copyright (c) 2022 Xilinx Inc.
+ * Copyright (C) 2022-2026, Advanced Micro Devices, Inc.
*/
#include <crypto/internal/hash.h>
+#include <crypto/engine.h>
#include <crypto/sha3.h>
#include <linux/cacheflush.h>
#include <linux/cleanup.h>
@@ -14,7 +16,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -27,164 +28,230 @@ enum zynqmp_sha_op {
};
struct zynqmp_sha_drv_ctx {
- struct shash_alg sha3_384;
+ struct ahash_engine_alg sha3_384;
+ struct crypto_engine *engine;
struct device *dev;
};
struct zynqmp_sha_tfm_ctx {
struct device *dev;
- struct crypto_shash *fbk_tfm;
+ struct crypto_ahash *fbk_tfm;
+};
+
+struct zynqmp_sha_desc_ctx {
+ struct ahash_request fallback_req;
};
static dma_addr_t update_dma_addr, final_dma_addr;
static char *ubuf, *fbuf;
-static DEFINE_SPINLOCK(zynqmp_sha_lock);
-
-static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
+static int zynqmp_sha_init_tfm(struct crypto_tfm *tfm)
{
- const char *fallback_driver_name = crypto_shash_alg_name(hash);
- struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
- struct shash_alg *alg = crypto_shash_alg(hash);
- struct crypto_shash *fallback_tfm;
+ const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct hash_alg_common *alg = crypto_hash_alg_common(__crypto_ahash_cast(tfm));
+ struct crypto_ahash *fallback_tfm;
struct zynqmp_sha_drv_ctx *drv_ctx;
- drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384);
+ drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384.base.halg);
tfm_ctx->dev = drv_ctx->dev;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, CRYPTO_ALG_TYPE_SHASH,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm))
return PTR_ERR(fallback_tfm);
- if (crypto_shash_descsize(hash) <
- crypto_shash_statesize(tfm_ctx->fbk_tfm)) {
- crypto_free_shash(fallback_tfm);
- return -EINVAL;
- }
-
tfm_ctx->fbk_tfm = fallback_tfm;
+ crypto_ahash_set_statesize(__crypto_ahash_cast(tfm),
+ crypto_ahash_statesize(fallback_tfm));
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_reqsize(tfm_ctx->fbk_tfm) +
+ sizeof(struct zynqmp_sha_desc_ctx));
return 0;
}
-static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
+static void zynqmp_sha_exit_tfm(struct crypto_tfm *tfm)
{
- struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
- crypto_free_shash(tfm_ctx->fbk_tfm);
+ if (tfm_ctx->fbk_tfm) {
+ crypto_free_ahash(tfm_ctx->fbk_tfm);
+ tfm_ctx->fbk_tfm = NULL;
+ }
+
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
}
-static int zynqmp_sha_continue(struct shash_desc *desc,
- struct shash_desc *fbdesc, int err)
+static int zynqmp_sha_init(struct ahash_request *req)
{
- err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc));
- shash_desc_zero(fbdesc);
- return err;
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_ahash_init(&dctx->fallback_req);
}
-static int zynqmp_sha_init(struct shash_desc *desc)
+static int zynqmp_sha_update(struct ahash_request *req)
{
- struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct crypto_shash *fbtfm = tctx->fbk_tfm;
- SHASH_DESC_ON_STACK(fbdesc, fbtfm);
- int err;
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ dctx->fallback_req.nbytes = req->nbytes;
+ dctx->fallback_req.src = req->src;
+ return crypto_ahash_update(&dctx->fallback_req);
+}
- fbdesc->tfm = fbtfm;
- err = crypto_shash_init(fbdesc);
- return zynqmp_sha_continue(desc, fbdesc, err);
+static int zynqmp_sha_final(struct ahash_request *req)
+{
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ dctx->fallback_req.result = req->result;
+ return crypto_ahash_final(&dctx->fallback_req);
}
-static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
+static int zynqmp_sha_finup(struct ahash_request *req)
{
- struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct crypto_shash *fbtfm = tctx->fbk_tfm;
- SHASH_DESC_ON_STACK(fbdesc, fbtfm);
- int err;
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
- fbdesc->tfm = fbtfm;
- err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
- crypto_shash_update(fbdesc, data, length);
- return zynqmp_sha_continue(desc, fbdesc, err);
+ dctx->fallback_req.nbytes = req->nbytes;
+ dctx->fallback_req.src = req->src;
+ dctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&dctx->fallback_req);
}
-static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
+static int zynqmp_sha_import(struct ahash_request *req, const void *in)
{
- struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct crypto_shash *fbtfm = tctx->fbk_tfm;
- SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_ahash_import(&dctx->fallback_req, in);
+}
- fbdesc->tfm = fbtfm;
- return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
- crypto_shash_finup(fbdesc, data, length, out);
+static int zynqmp_sha_export(struct ahash_request *req, void *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_ahash_export(&dctx->fallback_req, out);
}
-static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int sha_digest(struct ahash_request *req)
{
- unsigned int remaining_len = len;
+ struct crypto_tfm *tfm = crypto_ahash_tfm(crypto_ahash_reqtfm(req));
+ struct hash_alg_common *alg = crypto_hash_alg_common(__crypto_ahash_cast(tfm));
+ struct zynqmp_sha_drv_ctx *drv_ctx;
+
+ drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384.base.halg);
+ return crypto_transfer_hash_request_to_engine(drv_ctx->engine, req);
+}
+
+static int zynqmp_sha_digest(struct ahash_request *req)
+{
+ unsigned int processed = 0;
+ unsigned int remaining_len;
int update_size;
int ret;
+ remaining_len = req->nbytes;
ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
if (ret)
return ret;
- while (remaining_len != 0) {
- memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE);
- if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) {
+ while (remaining_len) {
+ if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE)
update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
- remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE;
- } else {
+ else
update_size = remaining_len;
- remaining_len = 0;
- }
- memcpy(ubuf, data, update_size);
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), ubuf, update_size, processed);
flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size);
ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE);
if (ret)
return ret;
- data += update_size;
+ remaining_len -= update_size;
+ processed += update_size;
}
ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL);
- memcpy(out, fbuf, SHA3_384_DIGEST_SIZE);
+ memcpy(req->result, fbuf, SHA3_384_DIGEST_SIZE);
memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE);
return ret;
}
-static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+static int handle_zynqmp_sha_engine_req(struct crypto_engine *engine, void *req)
{
- scoped_guard(spinlock_bh, &zynqmp_sha_lock)
- return __zynqmp_sha_digest(desc, data, len, out);
+ int err;
+
+ err = zynqmp_sha_digest(req);
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, req, err);
+ local_bh_enable();
+
+ return 0;
}
static struct zynqmp_sha_drv_ctx zynqmp_sha3_drv_ctx = {
- .sha3_384 = {
+ .sha3_384.base = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
.finup = zynqmp_sha_finup,
- .digest = zynqmp_sha_digest,
- .init_tfm = zynqmp_sha_init_tfm,
- .exit_tfm = zynqmp_sha_exit_tfm,
- .descsize = SHA3_384_EXPORT_SIZE,
- .digestsize = SHA3_384_DIGEST_SIZE,
- .base = {
- .cra_name = "sha3-384",
- .cra_driver_name = "zynqmp-sha3-384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA3_384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
- .cra_module = THIS_MODULE,
+ .digest = sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha3_state),
+ .base.cra_init = zynqmp_sha_init_tfm,
+ .base.cra_exit = zynqmp_sha_exit_tfm,
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "zynqmp-sha3-384",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
}
- }
+ },
+ .sha3_384.op = {
+ .do_one_request = handle_zynqmp_sha_engine_req,
+ },
};
+
+
static struct xlnx_feature sha_feature_map[] = {
{
.family = PM_ZYNQMP_FAMILY_CODE,
@@ -227,14 +294,30 @@ static int zynqmp_sha_probe(struct platform_device *pdev)
goto err_mem;
}
- err = crypto_register_shash(&sha3_drv_ctx->sha3_384);
+ sha3_drv_ctx->engine = crypto_engine_alloc_init(dev, 1);
+ if (!sha3_drv_ctx->engine) {
+ dev_err(dev, "Cannot alloc Crypto engine\n");
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ err = crypto_engine_start(sha3_drv_ctx->engine);
+ if (err) {
+ dev_err(dev, "Cannot start AES engine\n");
+ goto err_start;
+ }
+
+ err = crypto_engine_register_ahash(&sha3_drv_ctx->sha3_384);
if (err < 0) {
- dev_err(dev, "Failed to register shash alg.\n");
- goto err_mem1;
+ dev_err(dev, "Failed to register sha3 alg.\n");
+ goto err_start;
}
+
return 0;
-err_mem1:
+err_start:
+ crypto_engine_exit(sha3_drv_ctx->engine);
+err_engine:
dma_free_coherent(dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
err_mem:
@@ -248,9 +331,10 @@ static void zynqmp_sha_remove(struct platform_device *pdev)
struct zynqmp_sha_drv_ctx *sha3_drv_ctx;
sha3_drv_ctx = platform_get_drvdata(pdev);
+ crypto_engine_unregister_ahash(&sha3_drv_ctx->sha3_384);
+ crypto_engine_exit(sha3_drv_ctx->engine);
dma_free_coherent(sha3_drv_ctx->dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
dma_free_coherent(sha3_drv_ctx->dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
- crypto_unregister_shash(&sha3_drv_ctx->sha3_384);
}
static struct platform_driver zynqmp_sha_driver = {
--
2.34.1
next prev parent reply other threads:[~2026-03-03 7:20 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-03 7:19 [PATCH 0/6] crypto: zynqmp-aes-gcm: Bug fixes and sha3-384 support Harsh Jain
2026-03-03 7:19 ` [PATCH 1/6] crypto: xilinx: zynamp-sha: Update the driver to make it as self discoverable Harsh Jain
2026-03-03 7:19 ` Harsh Jain [this message]
2026-03-14 4:43 ` [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash Herbert Xu
2026-03-14 4:46 ` Herbert Xu
2026-03-03 7:19 ` [PATCH 3/6] crypto: zynqmp-sha: Replace zynqmp prefix with xilinx Harsh Jain
2026-03-03 7:19 ` [PATCH 4/6] firmware: xilinx: Add firmware API's to support sha3-384 in Versal device Harsh Jain
2026-03-03 7:19 ` [PATCH 5/6] crypto: zynqmp-sha: Save dma bit mask value in driver context Harsh Jain
2026-03-03 7:19 ` [PATCH 6/6] crypto: zynqmp-sha: Add sha3-384 support for AMD/Xilinx Versal device Harsh Jain
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260303071953.149252-3-h.jain@amd.com \
--to=h.jain@amd.com \
--cc=davem@davemloft.net \
--cc=herbert@gondor.apana.org.au \
--cc=linux-crypto@vger.kernel.org \
--cc=michal.simek@amd.com \
--cc=mounika.botcha@amd.com \
--cc=sarat.chand.savitala@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox