* [PATCH 1/6] crypto: xilinx: zynamp-sha: Update the driver to make it as self discoverable
2026-03-03 7:19 [PATCH 0/6] crypto: zynqmp-aes-gcm: Bug fixes and sha3-384 support Harsh Jain
@ 2026-03-03 7:19 ` Harsh Jain
2026-03-03 7:19 ` [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash Harsh Jain
` (4 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Harsh Jain @ 2026-03-03 7:19 UTC (permalink / raw)
To: herbert, davem, linux-crypto, mounika.botcha,
sarat.chand.savitala, michal.simek
Cc: Harsh Jain
From: Mounika Botcha <mounika.botcha@amd.com>
Update driver to self discover the device.
Signed-off-by: Mounika Botcha <mounika.botcha@amd.com>
Signed-off-by: Harsh Jain <h.jain@amd.com>
---
drivers/crypto/xilinx/zynqmp-sha.c | 85 +++++++++++++++++++++---------
1 file changed, 61 insertions(+), 24 deletions(-)
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 5813017b6b79..1d6b7f971111 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -162,7 +162,7 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i
return __zynqmp_sha_digest(desc, data, len, out);
}
-static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
+static struct zynqmp_sha_drv_ctx zynqmp_sha3_drv_ctx = {
.sha3_384 = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
@@ -185,17 +185,26 @@ static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
}
};
+static struct xlnx_feature sha_feature_map[] = {
+ {
+ .family = PM_ZYNQMP_FAMILY_CODE,
+ .feature_id = PM_SECURE_SHA,
+ .data = &zynqmp_sha3_drv_ctx,
+ },
+};
+
static int zynqmp_sha_probe(struct platform_device *pdev)
{
+ struct zynqmp_sha_drv_ctx *sha3_drv_ctx;
struct device *dev = &pdev->dev;
int err;
- u32 v;
/* Verify the hardware is present */
- err = zynqmp_pm_get_api_version(&v);
- if (err)
- return err;
-
+ sha3_drv_ctx = xlnx_get_crypto_dev_data(sha_feature_map);
+ if (IS_ERR(sha3_drv_ctx)) {
+ dev_err(dev, "SHA is not supported on the platform\n");
+ return PTR_ERR(sha3_drv_ctx);
+ }
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
if (err < 0) {
@@ -203,19 +212,13 @@ static int zynqmp_sha_probe(struct platform_device *pdev)
return err;
}
- err = crypto_register_shash(&sha3_drv_ctx.sha3_384);
- if (err < 0) {
- dev_err(dev, "Failed to register shash alg.\n");
- return err;
- }
-
- sha3_drv_ctx.dev = dev;
- platform_set_drvdata(pdev, &sha3_drv_ctx);
+ sha3_drv_ctx->dev = dev;
+ platform_set_drvdata(pdev, sha3_drv_ctx);
ubuf = dma_alloc_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, &update_dma_addr, GFP_KERNEL);
if (!ubuf) {
err = -ENOMEM;
- goto err_shash;
+ return err;
}
fbuf = dma_alloc_coherent(dev, SHA3_384_DIGEST_SIZE, &final_dma_addr, GFP_KERNEL);
@@ -224,24 +227,30 @@ static int zynqmp_sha_probe(struct platform_device *pdev)
goto err_mem;
}
+ err = crypto_register_shash(&sha3_drv_ctx->sha3_384);
+ if (err < 0) {
+ dev_err(dev, "Failed to register shash alg.\n");
+ goto err_mem1;
+ }
return 0;
-err_mem:
- dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+err_mem1:
+ dma_free_coherent(dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
-err_shash:
- crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+err_mem:
+ dma_free_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
return err;
}
static void zynqmp_sha_remove(struct platform_device *pdev)
{
- sha3_drv_ctx.dev = platform_get_drvdata(pdev);
+ struct zynqmp_sha_drv_ctx *sha3_drv_ctx;
- dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
- dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
- crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
+ sha3_drv_ctx = platform_get_drvdata(pdev);
+ dma_free_coherent(sha3_drv_ctx->dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
+ dma_free_coherent(sha3_drv_ctx->dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
+ crypto_unregister_shash(&sha3_drv_ctx->sha3_384);
}
static struct platform_driver zynqmp_sha_driver = {
@@ -252,7 +261,35 @@ static struct platform_driver zynqmp_sha_driver = {
},
};
-module_platform_driver(zynqmp_sha_driver);
+static struct platform_device *platform_dev;
+
+static int __init sha_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&zynqmp_sha_driver);
+ if (ret)
+ return ret;
+
+ platform_dev = platform_device_register_simple(zynqmp_sha_driver.driver.name,
+ 0, NULL, 0);
+ if (IS_ERR(platform_dev)) {
+ ret = PTR_ERR(platform_dev);
+ platform_driver_unregister(&zynqmp_sha_driver);
+ }
+
+ return ret;
+}
+
+static void __exit sha_driver_exit(void)
+{
+ platform_device_unregister(platform_dev);
+ platform_driver_unregister(&zynqmp_sha_driver);
+}
+
+module_init(sha_driver_init);
+module_exit(sha_driver_exit);
+
MODULE_DESCRIPTION("ZynqMP SHA3 hardware acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Harsha <harsha.harsha@xilinx.com>");
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash
2026-03-03 7:19 [PATCH 0/6] crypto: zynqmp-aes-gcm: Bug fixes and sha3-384 support Harsh Jain
2026-03-03 7:19 ` [PATCH 1/6] crypto: xilinx: zynamp-sha: Update the driver to make it as self discoverable Harsh Jain
@ 2026-03-03 7:19 ` Harsh Jain
2026-03-14 4:43 ` Herbert Xu
2026-03-14 4:46 ` Herbert Xu
2026-03-03 7:19 ` [PATCH 3/6] crypto: zynqmp-sha: Replace zynqmp prefix with xilinx Harsh Jain
` (3 subsequent siblings)
5 siblings, 2 replies; 9+ messages in thread
From: Harsh Jain @ 2026-03-03 7:19 UTC (permalink / raw)
To: herbert, davem, linux-crypto, mounika.botcha,
sarat.chand.savitala, michal.simek
Cc: Harsh Jain
descsize in shash type has size limit, For driver which fallback does
not fit in shash type. Change the algo type from shash to ahash.
Also adds crypto engine support to serialize the requests.
Signed-off-by: Harsh Jain <h.jain@amd.com>
---
drivers/crypto/xilinx/zynqmp-sha.c | 258 +++++++++++++++++++----------
1 file changed, 171 insertions(+), 87 deletions(-)
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 1d6b7f971111..cd951e692dd9 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -2,8 +2,10 @@
/*
* Xilinx ZynqMP SHA Driver.
* Copyright (c) 2022 Xilinx Inc.
+ * Copyright (C) 2022-2026, Advanced Micro Devices, Inc.
*/
#include <crypto/internal/hash.h>
+#include <crypto/engine.h>
#include <crypto/sha3.h>
#include <linux/cacheflush.h>
#include <linux/cleanup.h>
@@ -14,7 +16,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -27,164 +28,230 @@ enum zynqmp_sha_op {
};
struct zynqmp_sha_drv_ctx {
- struct shash_alg sha3_384;
+ struct ahash_engine_alg sha3_384;
+ struct crypto_engine *engine;
struct device *dev;
};
struct zynqmp_sha_tfm_ctx {
struct device *dev;
- struct crypto_shash *fbk_tfm;
+ struct crypto_ahash *fbk_tfm;
+};
+
+struct zynqmp_sha_desc_ctx {
+ struct ahash_request fallback_req;
};
static dma_addr_t update_dma_addr, final_dma_addr;
static char *ubuf, *fbuf;
-static DEFINE_SPINLOCK(zynqmp_sha_lock);
-
-static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
+static int zynqmp_sha_init_tfm(struct crypto_tfm *tfm)
{
- const char *fallback_driver_name = crypto_shash_alg_name(hash);
- struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
- struct shash_alg *alg = crypto_shash_alg(hash);
- struct crypto_shash *fallback_tfm;
+ const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct hash_alg_common *alg = crypto_hash_alg_common(__crypto_ahash_cast(tfm));
+ struct crypto_ahash *fallback_tfm;
struct zynqmp_sha_drv_ctx *drv_ctx;
- drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384);
+ drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384.base.halg);
tfm_ctx->dev = drv_ctx->dev;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ fallback_tfm = crypto_alloc_ahash(fallback_driver_name, CRYPTO_ALG_TYPE_SHASH,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm))
return PTR_ERR(fallback_tfm);
- if (crypto_shash_descsize(hash) <
- crypto_shash_statesize(tfm_ctx->fbk_tfm)) {
- crypto_free_shash(fallback_tfm);
- return -EINVAL;
- }
-
tfm_ctx->fbk_tfm = fallback_tfm;
+ crypto_ahash_set_statesize(__crypto_ahash_cast(tfm),
+ crypto_ahash_statesize(fallback_tfm));
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_reqsize(tfm_ctx->fbk_tfm) +
+ sizeof(struct zynqmp_sha_desc_ctx));
return 0;
}
-static void zynqmp_sha_exit_tfm(struct crypto_shash *hash)
+static void zynqmp_sha_exit_tfm(struct crypto_tfm *tfm)
{
- struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash);
+ struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
- crypto_free_shash(tfm_ctx->fbk_tfm);
+ if (tfm_ctx->fbk_tfm) {
+ crypto_free_ahash(tfm_ctx->fbk_tfm);
+ tfm_ctx->fbk_tfm = NULL;
+ }
+
+ memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
}
-static int zynqmp_sha_continue(struct shash_desc *desc,
- struct shash_desc *fbdesc, int err)
+static int zynqmp_sha_init(struct ahash_request *req)
{
- err = err ?: crypto_shash_export(fbdesc, shash_desc_ctx(desc));
- shash_desc_zero(fbdesc);
- return err;
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_ahash_init(&dctx->fallback_req);
}
-static int zynqmp_sha_init(struct shash_desc *desc)
+static int zynqmp_sha_update(struct ahash_request *req)
{
- struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct crypto_shash *fbtfm = tctx->fbk_tfm;
- SHASH_DESC_ON_STACK(fbdesc, fbtfm);
- int err;
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ dctx->fallback_req.nbytes = req->nbytes;
+ dctx->fallback_req.src = req->src;
+ return crypto_ahash_update(&dctx->fallback_req);
+}
- fbdesc->tfm = fbtfm;
- err = crypto_shash_init(fbdesc);
- return zynqmp_sha_continue(desc, fbdesc, err);
+static int zynqmp_sha_final(struct ahash_request *req)
+{
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ dctx->fallback_req.result = req->result;
+ return crypto_ahash_final(&dctx->fallback_req);
}
-static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length)
+static int zynqmp_sha_finup(struct ahash_request *req)
{
- struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct crypto_shash *fbtfm = tctx->fbk_tfm;
- SHASH_DESC_ON_STACK(fbdesc, fbtfm);
- int err;
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
- fbdesc->tfm = fbtfm;
- err = crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
- crypto_shash_update(fbdesc, data, length);
- return zynqmp_sha_continue(desc, fbdesc, err);
+ dctx->fallback_req.nbytes = req->nbytes;
+ dctx->fallback_req.src = req->src;
+ dctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&dctx->fallback_req);
}
-static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out)
+static int zynqmp_sha_import(struct ahash_request *req, const void *in)
{
- struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct crypto_shash *fbtfm = tctx->fbk_tfm;
- SHASH_DESC_ON_STACK(fbdesc, fbtfm);
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_ahash_import(&dctx->fallback_req, in);
+}
- fbdesc->tfm = fbtfm;
- return crypto_shash_import(fbdesc, shash_desc_ctx(desc)) ?:
- crypto_shash_finup(fbdesc, data, length, out);
+static int zynqmp_sha_export(struct ahash_request *req, void *out)
+{
+ struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
+ dctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_ahash_export(&dctx->fallback_req, out);
}
-static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
+static int sha_digest(struct ahash_request *req)
{
- unsigned int remaining_len = len;
+ struct crypto_tfm *tfm = crypto_ahash_tfm(crypto_ahash_reqtfm(req));
+ struct hash_alg_common *alg = crypto_hash_alg_common(__crypto_ahash_cast(tfm));
+ struct zynqmp_sha_drv_ctx *drv_ctx;
+
+ drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384.base.halg);
+ return crypto_transfer_hash_request_to_engine(drv_ctx->engine, req);
+}
+
+static int zynqmp_sha_digest(struct ahash_request *req)
+{
+ unsigned int processed = 0;
+ unsigned int remaining_len;
int update_size;
int ret;
+ remaining_len = req->nbytes;
ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT);
if (ret)
return ret;
- while (remaining_len != 0) {
- memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE);
- if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) {
+ while (remaining_len) {
+ if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE)
update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
- remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE;
- } else {
+ else
update_size = remaining_len;
- remaining_len = 0;
- }
- memcpy(ubuf, data, update_size);
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), ubuf, update_size, processed);
flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size);
ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE);
if (ret)
return ret;
- data += update_size;
+ remaining_len -= update_size;
+ processed += update_size;
}
ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL);
- memcpy(out, fbuf, SHA3_384_DIGEST_SIZE);
+ memcpy(req->result, fbuf, SHA3_384_DIGEST_SIZE);
memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE);
return ret;
}
-static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out)
+static int handle_zynqmp_sha_engine_req(struct crypto_engine *engine, void *req)
{
- scoped_guard(spinlock_bh, &zynqmp_sha_lock)
- return __zynqmp_sha_digest(desc, data, len, out);
+ int err;
+
+ err = zynqmp_sha_digest(req);
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, req, err);
+ local_bh_enable();
+
+ return 0;
}
static struct zynqmp_sha_drv_ctx zynqmp_sha3_drv_ctx = {
- .sha3_384 = {
+ .sha3_384.base = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
.finup = zynqmp_sha_finup,
- .digest = zynqmp_sha_digest,
- .init_tfm = zynqmp_sha_init_tfm,
- .exit_tfm = zynqmp_sha_exit_tfm,
- .descsize = SHA3_384_EXPORT_SIZE,
- .digestsize = SHA3_384_DIGEST_SIZE,
- .base = {
- .cra_name = "sha3-384",
- .cra_driver_name = "zynqmp-sha3-384",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA3_384_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
- .cra_module = THIS_MODULE,
+ .digest = sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha3_state),
+ .base.cra_init = zynqmp_sha_init_tfm,
+ .base.cra_exit = zynqmp_sha_exit_tfm,
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "zynqmp-sha3-384",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
}
- }
+ },
+ .sha3_384.op = {
+ .do_one_request = handle_zynqmp_sha_engine_req,
+ },
};
+
+
static struct xlnx_feature sha_feature_map[] = {
{
.family = PM_ZYNQMP_FAMILY_CODE,
@@ -227,14 +294,30 @@ static int zynqmp_sha_probe(struct platform_device *pdev)
goto err_mem;
}
- err = crypto_register_shash(&sha3_drv_ctx->sha3_384);
+ sha3_drv_ctx->engine = crypto_engine_alloc_init(dev, 1);
+ if (!sha3_drv_ctx->engine) {
+ dev_err(dev, "Cannot alloc Crypto engine\n");
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ err = crypto_engine_start(sha3_drv_ctx->engine);
+ if (err) {
+ dev_err(dev, "Cannot start AES engine\n");
+ goto err_start;
+ }
+
+ err = crypto_engine_register_ahash(&sha3_drv_ctx->sha3_384);
if (err < 0) {
- dev_err(dev, "Failed to register shash alg.\n");
- goto err_mem1;
+ dev_err(dev, "Failed to register sha3 alg.\n");
+ goto err_start;
}
+
return 0;
-err_mem1:
+err_start:
+ crypto_engine_exit(sha3_drv_ctx->engine);
+err_engine:
dma_free_coherent(dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
err_mem:
@@ -248,9 +331,10 @@ static void zynqmp_sha_remove(struct platform_device *pdev)
struct zynqmp_sha_drv_ctx *sha3_drv_ctx;
sha3_drv_ctx = platform_get_drvdata(pdev);
+ crypto_engine_unregister_ahash(&sha3_drv_ctx->sha3_384);
+ crypto_engine_exit(sha3_drv_ctx->engine);
dma_free_coherent(sha3_drv_ctx->dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
dma_free_coherent(sha3_drv_ctx->dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
- crypto_unregister_shash(&sha3_drv_ctx->sha3_384);
}
static struct platform_driver zynqmp_sha_driver = {
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* Re: [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash
2026-03-03 7:19 ` [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash Harsh Jain
@ 2026-03-14 4:43 ` Herbert Xu
2026-03-14 4:46 ` Herbert Xu
1 sibling, 0 replies; 9+ messages in thread
From: Herbert Xu @ 2026-03-14 4:43 UTC (permalink / raw)
To: Harsh Jain
Cc: davem, linux-crypto, mounika.botcha, sarat.chand.savitala,
michal.simek
On Tue, Mar 03, 2026 at 12:49:49PM +0530, Harsh Jain wrote:
>
> -static int zynqmp_sha_init_tfm(struct crypto_shash *hash)
> +static int zynqmp_sha_init_tfm(struct crypto_tfm *tfm)
Please don't use the obsolete cra_init functions. You should
use the ahash-specific init_tfm hook instead.
Thanks,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash
2026-03-03 7:19 ` [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash Harsh Jain
2026-03-14 4:43 ` Herbert Xu
@ 2026-03-14 4:46 ` Herbert Xu
1 sibling, 0 replies; 9+ messages in thread
From: Herbert Xu @ 2026-03-14 4:46 UTC (permalink / raw)
To: Harsh Jain
Cc: davem, linux-crypto, mounika.botcha, sarat.chand.savitala,
michal.simek
On Tue, Mar 03, 2026 at 12:49:49PM +0530, Harsh Jain wrote:
>
> +struct zynqmp_sha_desc_ctx {
> + struct ahash_request fallback_req;
Please use HASH_FBREQ_ON_STACK instead of rolling your own fallback.
See drivers/crypto/aspeed for an example.
Cheers,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH 3/6] crypto: zynqmp-sha: Replace zynqmp prefix with xilinx
2026-03-03 7:19 [PATCH 0/6] crypto: zynqmp-aes-gcm: Bug fixes and sha3-384 support Harsh Jain
2026-03-03 7:19 ` [PATCH 1/6] crypto: xilinx: zynamp-sha: Update the driver to make it as self discoverable Harsh Jain
2026-03-03 7:19 ` [PATCH 2/6] crypto: zynqmp-sha: Change algo type from shash to ahash Harsh Jain
@ 2026-03-03 7:19 ` Harsh Jain
2026-03-03 7:19 ` [PATCH 4/6] firmware: xilinx: Add firmware API's to support sha3-384 in Versal device Harsh Jain
` (2 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Harsh Jain @ 2026-03-03 7:19 UTC (permalink / raw)
To: herbert, davem, linux-crypto, mounika.botcha,
sarat.chand.savitala, michal.simek
Cc: Harsh Jain
Prepare for versal sha3-384 support by renaming symbols from
zynqmp to xilinx.
Signed-off-by: Harsh Jain <h.jain@amd.com>
---
drivers/crypto/xilinx/zynqmp-sha.c | 54 +++++++++++++++---------------
1 file changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index cd951e692dd9..74e95df5eefc 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -27,18 +27,18 @@ enum zynqmp_sha_op {
ZYNQMP_SHA3_FINAL = 4,
};
-struct zynqmp_sha_drv_ctx {
+struct xilinx_sha_drv_ctx {
struct ahash_engine_alg sha3_384;
struct crypto_engine *engine;
struct device *dev;
};
-struct zynqmp_sha_tfm_ctx {
+struct xilinx_sha_tfm_ctx {
struct device *dev;
struct crypto_ahash *fbk_tfm;
};
-struct zynqmp_sha_desc_ctx {
+struct xilinx_sha_desc_ctx {
struct ahash_request fallback_req;
};
@@ -48,12 +48,12 @@ static char *ubuf, *fbuf;
static int zynqmp_sha_init_tfm(struct crypto_tfm *tfm)
{
const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct xilinx_sha_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
struct hash_alg_common *alg = crypto_hash_alg_common(__crypto_ahash_cast(tfm));
struct crypto_ahash *fallback_tfm;
- struct zynqmp_sha_drv_ctx *drv_ctx;
+ struct xilinx_sha_drv_ctx *drv_ctx;
- drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384.base.halg);
+ drv_ctx = container_of(alg, struct xilinx_sha_drv_ctx, sha3_384.base.halg);
tfm_ctx->dev = drv_ctx->dev;
/* Allocate a fallback and abort if it failed. */
@@ -67,28 +67,28 @@ static int zynqmp_sha_init_tfm(struct crypto_tfm *tfm)
crypto_ahash_statesize(fallback_tfm));
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
crypto_ahash_reqsize(tfm_ctx->fbk_tfm) +
- sizeof(struct zynqmp_sha_desc_ctx));
+ sizeof(struct xilinx_sha_desc_ctx));
return 0;
}
static void zynqmp_sha_exit_tfm(struct crypto_tfm *tfm)
{
- struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+ struct xilinx_sha_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
if (tfm_ctx->fbk_tfm) {
crypto_free_ahash(tfm_ctx->fbk_tfm);
tfm_ctx->fbk_tfm = NULL;
}
- memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx));
+ memzero_explicit(tfm_ctx, sizeof(struct xilinx_sha_tfm_ctx));
}
static int zynqmp_sha_init(struct ahash_request *req)
{
- struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct xilinx_sha_desc_ctx *dctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct xilinx_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
dctx->fallback_req.base.flags = req->base.flags &
@@ -98,9 +98,9 @@ static int zynqmp_sha_init(struct ahash_request *req)
static int zynqmp_sha_update(struct ahash_request *req)
{
- struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct xilinx_sha_desc_ctx *dctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct xilinx_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
dctx->fallback_req.base.flags = req->base.flags &
@@ -112,9 +112,9 @@ static int zynqmp_sha_update(struct ahash_request *req)
static int zynqmp_sha_final(struct ahash_request *req)
{
- struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct xilinx_sha_desc_ctx *dctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct xilinx_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
dctx->fallback_req.base.flags = req->base.flags &
@@ -125,9 +125,9 @@ static int zynqmp_sha_final(struct ahash_request *req)
static int zynqmp_sha_finup(struct ahash_request *req)
{
- struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct xilinx_sha_desc_ctx *dctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct xilinx_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
dctx->fallback_req.base.flags = req->base.flags &
@@ -142,9 +142,9 @@ static int zynqmp_sha_finup(struct ahash_request *req)
static int zynqmp_sha_import(struct ahash_request *req, const void *in)
{
- struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct xilinx_sha_desc_ctx *dctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct xilinx_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
dctx->fallback_req.base.flags = req->base.flags &
@@ -154,9 +154,9 @@ static int zynqmp_sha_import(struct ahash_request *req, const void *in)
static int zynqmp_sha_export(struct ahash_request *req, void *out)
{
- struct zynqmp_sha_desc_ctx *dctx = ahash_request_ctx(req);
+ struct xilinx_sha_desc_ctx *dctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct zynqmp_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct xilinx_sha_tfm_ctx *tctx = crypto_ahash_ctx(tfm);
ahash_request_set_tfm(&dctx->fallback_req, tctx->fbk_tfm);
dctx->fallback_req.base.flags = req->base.flags &
@@ -168,9 +168,9 @@ static int sha_digest(struct ahash_request *req)
{
struct crypto_tfm *tfm = crypto_ahash_tfm(crypto_ahash_reqtfm(req));
struct hash_alg_common *alg = crypto_hash_alg_common(__crypto_ahash_cast(tfm));
- struct zynqmp_sha_drv_ctx *drv_ctx;
+ struct xilinx_sha_drv_ctx *drv_ctx;
- drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384.base.halg);
+ drv_ctx = container_of(alg, struct xilinx_sha_drv_ctx, sha3_384.base.halg);
return crypto_transfer_hash_request_to_engine(drv_ctx->engine, req);
}
@@ -220,7 +220,7 @@ static int handle_zynqmp_sha_engine_req(struct crypto_engine *engine, void *req)
return 0;
}
-static struct zynqmp_sha_drv_ctx zynqmp_sha3_drv_ctx = {
+static struct xilinx_sha_drv_ctx zynqmp_sha3_drv_ctx = {
.sha3_384.base = {
.init = zynqmp_sha_init,
.update = zynqmp_sha_update,
@@ -241,7 +241,7 @@ static struct zynqmp_sha_drv_ctx zynqmp_sha3_drv_ctx = {
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
+ .base.cra_ctxsize = sizeof(struct xilinx_sha_tfm_ctx),
.base.cra_module = THIS_MODULE,
}
},
@@ -262,7 +262,7 @@ static struct xlnx_feature sha_feature_map[] = {
static int zynqmp_sha_probe(struct platform_device *pdev)
{
- struct zynqmp_sha_drv_ctx *sha3_drv_ctx;
+ struct xilinx_sha_drv_ctx *sha3_drv_ctx;
struct device *dev = &pdev->dev;
int err;
@@ -328,7 +328,7 @@ static int zynqmp_sha_probe(struct platform_device *pdev)
static void zynqmp_sha_remove(struct platform_device *pdev)
{
- struct zynqmp_sha_drv_ctx *sha3_drv_ctx;
+ struct xilinx_sha_drv_ctx *sha3_drv_ctx;
sha3_drv_ctx = platform_get_drvdata(pdev);
crypto_engine_unregister_ahash(&sha3_drv_ctx->sha3_384);
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 4/6] firmware: xilinx: Add firmware API's to support sha3-384 in Versal device
2026-03-03 7:19 [PATCH 0/6] crypto: zynqmp-aes-gcm: Bug fixes and sha3-384 support Harsh Jain
` (2 preceding siblings ...)
2026-03-03 7:19 ` [PATCH 3/6] crypto: zynqmp-sha: Replace zynqmp prefix with xilinx Harsh Jain
@ 2026-03-03 7:19 ` Harsh Jain
2026-03-03 7:19 ` [PATCH 5/6] crypto: zynqmp-sha: Save dma bit mask value in driver context Harsh Jain
2026-03-03 7:19 ` [PATCH 6/6] crypto: zynqmp-sha: Add sha3-384 support for AMD/Xilinx Versal device Harsh Jain
5 siblings, 0 replies; 9+ messages in thread
From: Harsh Jain @ 2026-03-03 7:19 UTC (permalink / raw)
To: herbert, davem, linux-crypto, mounika.botcha,
sarat.chand.savitala, michal.simek
Cc: Harsh Jain
From: Mounika Botcha <mounika.botcha@amd.com>
Add sha3-384 crypto API's for AMD/Xilinx Versal device.
Signed-off-by: Mounika Botcha <mounika.botcha@amd.com>
Signed-off-by: Harsh Jain <h.jain@amd.com>
---
drivers/firmware/xilinx/zynqmp-crypto.c | 24 +++++++++++++++++++++
include/linux/firmware/xlnx-zynqmp-crypto.h | 8 +++++++
2 files changed, 32 insertions(+)
diff --git a/drivers/firmware/xilinx/zynqmp-crypto.c b/drivers/firmware/xilinx/zynqmp-crypto.c
index f06f1e2f67b8..caa3ee8c6e2a 100644
--- a/drivers/firmware/xilinx/zynqmp-crypto.c
+++ b/drivers/firmware/xilinx/zynqmp-crypto.c
@@ -236,3 +236,27 @@ int versal_pm_aes_init(void)
return zynqmp_pm_invoke_fn(XSECURE_API_AES_INIT, NULL, 0);
}
EXPORT_SYMBOL_GPL(versal_pm_aes_init);
+
+/**
+ * versal_pm_sha_hash - Access the SHA engine to calculate the hash
+ * @src: Address of the data
+ * @dst: Address of the output buffer
+ * @size: Size of the data.
+ * @out_status: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+int versal_pm_sha_hash(const u64 src, const u64 dst, const u32 size, u32 *out_status)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out_status)
+ return -EINVAL;
+ ret = zynqmp_pm_invoke_fn(XSECURE_API_SHA3_UPDATE, ret_payload, 5,
+ lower_32_bits(src), upper_32_bits(src),
+ size, lower_32_bits(dst), upper_32_bits(dst));
+ *out_status = ret_payload[0];
+ return ret;
+}
+EXPORT_SYMBOL_GPL(versal_pm_sha_hash);
diff --git a/include/linux/firmware/xlnx-zynqmp-crypto.h b/include/linux/firmware/xlnx-zynqmp-crypto.h
index 56595ab37c43..c93b77a2c084 100644
--- a/include/linux/firmware/xlnx-zynqmp-crypto.h
+++ b/include/linux/firmware/xlnx-zynqmp-crypto.h
@@ -33,6 +33,8 @@ struct xlnx_feature {
#define XSECURE_API_AES_KEY_ZERO 0x510
#define XSECURE_API_AES_WRITE_KEY 0x511
+#define XSECURE_API_SHA3_UPDATE 0x504
+
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_aes_engine(const u64 address, u32 *out);
int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
@@ -47,6 +49,7 @@ int versal_pm_aes_dec_update(const u64 in_params, const u64 in_addr);
int versal_pm_aes_dec_final(const u64 gcm_addr);
int versal_pm_aes_enc_final(const u64 gcm_addr);
int versal_pm_aes_init(void);
+int versal_pm_sha_hash(const u64 src, const u64 dst, const u32 size, u32 *out_status);
#else
static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
@@ -114,6 +117,11 @@ static inline int versal_pm_aes_init(void)
return -ENODEV;
}
+static inline int versal_pm_sha_hash(const u64 src, const u64 dst, const u32 size, u32 *out_status)
+{
+ return -ENODEV;
+}
+
#endif
#endif /* __FIRMWARE_XLNX_ZYNQMP_CRYPTO_H__ */
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 5/6] crypto: zynqmp-sha: Save dma bit mask value in driver context
2026-03-03 7:19 [PATCH 0/6] crypto: zynqmp-aes-gcm: Bug fixes and sha3-384 support Harsh Jain
` (3 preceding siblings ...)
2026-03-03 7:19 ` [PATCH 4/6] firmware: xilinx: Add firmware API's to support sha3-384 in Versal device Harsh Jain
@ 2026-03-03 7:19 ` Harsh Jain
2026-03-03 7:19 ` [PATCH 6/6] crypto: zynqmp-sha: Add sha3-384 support for AMD/Xilinx Versal device Harsh Jain
5 siblings, 0 replies; 9+ messages in thread
From: Harsh Jain @ 2026-03-03 7:19 UTC (permalink / raw)
To: herbert, davem, linux-crypto, mounika.botcha,
sarat.chand.savitala, michal.simek
Cc: Harsh Jain
Save dma mask in driver context. It will allow upcoming Versal sha3-384 to
use different value.
Signed-off-by: Harsh Jain <h.jain@amd.com>
---
drivers/crypto/xilinx/zynqmp-sha.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 74e95df5eefc..72b405758200 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -31,6 +31,7 @@ struct xilinx_sha_drv_ctx {
struct ahash_engine_alg sha3_384;
struct crypto_engine *engine;
struct device *dev;
+ u8 dma_addr_size;
};
struct xilinx_sha_tfm_ctx {
@@ -248,6 +249,7 @@ static struct xilinx_sha_drv_ctx zynqmp_sha3_drv_ctx = {
.sha3_384.op = {
.do_one_request = handle_zynqmp_sha_engine_req,
},
+ .dma_addr_size = ZYNQMP_DMA_BIT_MASK,
};
@@ -273,7 +275,7 @@ static int zynqmp_sha_probe(struct platform_device *pdev)
return PTR_ERR(sha3_drv_ctx);
}
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(sha3_drv_ctx->dma_addr_size));
if (err < 0) {
dev_err(dev, "No usable DMA configuration\n");
return err;
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* [PATCH 6/6] crypto: zynqmp-sha: Add sha3-384 support for AMD/Xilinx Versal device
2026-03-03 7:19 [PATCH 0/6] crypto: zynqmp-aes-gcm: Bug fixes and sha3-384 support Harsh Jain
` (4 preceding siblings ...)
2026-03-03 7:19 ` [PATCH 5/6] crypto: zynqmp-sha: Save dma bit mask value in driver context Harsh Jain
@ 2026-03-03 7:19 ` Harsh Jain
5 siblings, 0 replies; 9+ messages in thread
From: Harsh Jain @ 2026-03-03 7:19 UTC (permalink / raw)
To: herbert, davem, linux-crypto, mounika.botcha,
sarat.chand.savitala, michal.simek
Cc: Harsh Jain
Adds SHA3 driver support for the Xilinx Versal SoC.
Versal SoC SHA3 engine does not support context export, Accordingly cannot
handle parallel request. For unsupported cases it is using fallback.
For digest, the calculation of SHA3 hash is done by the hardened
SHA3 accelerator in Versal.
Signed-off-by: Harsh Jain <h.jain@amd.com>
---
drivers/crypto/xilinx/zynqmp-sha.c | 119 ++++++++++++++++++++++++++++-
1 file changed, 118 insertions(+), 1 deletion(-)
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 72b405758200..b418c917ab02 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -18,8 +18,17 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#define CONTINUE_PACKET BIT(31)
+#define FIRST_PACKET BIT(30)
+#define FINAL_PACKET 0
+#define RESET 0
#define ZYNQMP_DMA_BIT_MASK 32U
+#define VERSAL_DMA_BIT_MASK 64U
#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U
+#define VERSAL_SHA3_INVALID_PARAM 0x08U
+#define VERSAL_SHA3_STATE_MISMATCH_ERROR 0x0AU
+#define VERSAL_SHA3_FINISH_ERROR 0x07U
+#define VERSAL_SHA3_PMC_DMA_UPDATE_ERROR 0x04U
enum zynqmp_sha_op {
ZYNQMP_SHA3_INIT = 1,
@@ -209,6 +218,67 @@ static int zynqmp_sha_digest(struct ahash_request *req)
return ret;
}
+static int versal_sha_fw_error_decode(int status)
+{
+ switch (status) {
+ case VERSAL_SHA3_INVALID_PARAM:
+ pr_err("ERROR: On invalid parameter\n");
+ return -EINVAL;
+ case VERSAL_SHA3_STATE_MISMATCH_ERROR:
+ pr_err("ERROR: SHA3 state mismatch error\n");
+ return -EINVAL;
+ case VERSAL_SHA3_FINISH_ERROR:
+ pr_err("ERROR: SHA3 finish error\n");
+ return -EIO;
+ case VERSAL_SHA3_PMC_DMA_UPDATE_ERROR:
+ pr_err("ERROR: SHA3 PMC DMA update error\n");
+ return -EIO;
+ default:
+ pr_err("ERROR: Unknown SHA3 FW error code: %u\n", status);
+ return -EIO;
+ }
+}
+
+static int versal_sha_digest(struct ahash_request *req)
+{
+ int update_size, ret, flag = FIRST_PACKET;
+ unsigned int processed = 0;
+ unsigned int remaining_len;
+ unsigned int fw_status = 0;
+
+ remaining_len = req->nbytes;
+ while (remaining_len) {
+ if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE)
+ update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE;
+ else
+ update_size = remaining_len;
+
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), ubuf, update_size, processed);
+ flush_icache_range((unsigned long)ubuf,
+ (unsigned long)ubuf + update_size);
+
+ flag |= CONTINUE_PACKET;
+ ret = versal_pm_sha_hash(update_dma_addr, 0,
+ update_size | flag, &fw_status);
+ if (ret)
+ return versal_sha_fw_error_decode(fw_status);
+
+ remaining_len -= update_size;
+ processed += update_size;
+ flag = RESET;
+ }
+
+ flag |= FINAL_PACKET;
+ ret = versal_pm_sha_hash(0, final_dma_addr, flag, &fw_status);
+ if (ret)
+ return versal_sha_fw_error_decode(fw_status);
+
+ memcpy(req->result, fbuf, SHA3_384_DIGEST_SIZE);
+ memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE);
+
+ return 0;
+}
+
static int handle_zynqmp_sha_engine_req(struct crypto_engine *engine, void *req)
{
int err;
@@ -221,6 +291,18 @@ static int handle_zynqmp_sha_engine_req(struct crypto_engine *engine, void *req)
return 0;
}
+static int handle_versal_sha_engine_req(struct crypto_engine *engine, void *req)
+{
+ int err;
+
+ err = versal_sha_digest(req);
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, req, err);
+ local_bh_enable();
+
+ return 0;
+}
+
static struct xilinx_sha_drv_ctx zynqmp_sha3_drv_ctx = {
.sha3_384.base = {
.init = zynqmp_sha_init,
@@ -252,7 +334,36 @@ static struct xilinx_sha_drv_ctx zynqmp_sha3_drv_ctx = {
.dma_addr_size = ZYNQMP_DMA_BIT_MASK,
};
-
+static struct xilinx_sha_drv_ctx versal_sha3_drv_ctx = {
+ .sha3_384.base = {
+ .init = zynqmp_sha_init,
+ .update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
+ .finup = zynqmp_sha_finup,
+ .digest = sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .halg = {
+ .base.cra_init = zynqmp_sha_init_tfm,
+ .base.cra_exit = zynqmp_sha_exit_tfm,
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "versal-sha3-384",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct xilinx_sha_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .statesize = sizeof(struct sha3_state),
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ }
+ },
+ .sha3_384.op = {
+ .do_one_request = handle_versal_sha_engine_req,
+ },
+ .dma_addr_size = VERSAL_DMA_BIT_MASK,
+};
static struct xlnx_feature sha_feature_map[] = {
{
@@ -260,6 +371,12 @@ static struct xlnx_feature sha_feature_map[] = {
.feature_id = PM_SECURE_SHA,
.data = &zynqmp_sha3_drv_ctx,
},
+ {
+ .family = PM_VERSAL_FAMILY_CODE,
+ .feature_id = XSECURE_API_SHA3_UPDATE,
+ .data = &versal_sha3_drv_ctx,
+ },
+ { /* sentinel */ }
};
static int zynqmp_sha_probe(struct platform_device *pdev)
--
2.34.1
^ permalink raw reply related [flat|nested] 9+ messages in thread